kombu-4.1.0/0000755000175000017500000000000013134154263012561 5ustar omeromer00000000000000kombu-4.1.0/MANIFEST.in0000644000175000017500000000100313130603207014302 0ustar omeromer00000000000000include AUTHORS include Changelog include FAQ include INSTALL include LICENSE include MANIFEST.in include README.rst include README include THANKS include TODO include setup.cfg recursive-include extra * recursive-include docs * recursive-include kombu *.py recursive-include t *.py recursive-include requirements *.txt recursive-include funtests *.py setup.cfg recursive-include examples *.py recursive-exclude docs/_build * recursive-exclude * __pycache__ recursive-exclude * *.py[co] recursive-exclude * .*.sw* kombu-4.1.0/TODO0000644000175000017500000000012213130603207013235 0ustar omeromer00000000000000Please see our Issue Tracker at GitHub: http://github.com/celery/kombu/issues kombu-4.1.0/FAQ0000644000175000017500000000066413130603207013112 0ustar omeromer00000000000000============================ Frequently Asked Questions ============================ Questions ========= Q: Message.reject doesn't work? -------------------------------------- **Answer**: Earlier versions of RabbitMQ did not implement ``basic.reject``, so make sure your version is recent enough to support it. Q: Message.requeue doesn't work? -------------------------------------- **Answer**: See _`Message.reject doesn't work?` kombu-4.1.0/t/0000755000175000017500000000000013134154263013024 5ustar omeromer00000000000000kombu-4.1.0/t/mocks.py0000644000175000017500000001127113134153516014514 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from itertools import count from case import ContextMock, Mock from kombu.transport import base from kombu.utils import json def PromiseMock(*args, **kwargs): m = Mock(*args, **kwargs) def on_throw(exc=None, *args, **kwargs): if exc: raise exc raise m.throw.side_effect = on_throw m.set_error_state.side_effect = on_throw m.throw1.side_effect = on_throw return m class MockPool(object): def __init__(self, value=None): self.value = value or ContextMock() def acquire(self, **kwargs): return self.value class Message(base.Message): def __init__(self, *args, **kwargs): self.throw_decode_error = kwargs.get('throw_decode_error', False) super(Message, self).__init__(*args, **kwargs) def decode(self): if self.throw_decode_error: raise ValueError("can't decode message") return super(Message, self).decode() class Channel(base.StdChannel): open = True throw_decode_error = False _ids = count(1) def __init__(self, connection): self.connection = connection self.called = [] self.deliveries = count(1) self.to_deliver = [] self.events = {'basic_return': set()} self.channel_id = next(self._ids) def _called(self, name): self.called.append(name) def __contains__(self, key): return key in self.called def exchange_declare(self, *args, **kwargs): self._called('exchange_declare') def prepare_message(self, body, priority=0, content_type=None, content_encoding=None, headers=None, properties={}): self._called('prepare_message') return dict(body=body, headers=headers, properties=properties, priority=priority, content_type=content_type, content_encoding=content_encoding) def basic_publish(self, message, exchange='', routing_key='', mandatory=False, immediate=False, **kwargs): self._called('basic_publish') return message, exchange, routing_key def exchange_delete(self, *args, **kwargs): self._called('exchange_delete') def queue_declare(self, *args, **kwargs): self._called('queue_declare') def queue_bind(self, *args, **kwargs): self._called('queue_bind') def queue_unbind(self, *args, **kwargs): self._called('queue_unbind') def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): self._called('queue_delete') def basic_get(self, *args, **kwargs): self._called('basic_get') try: return self.to_deliver.pop() except IndexError: pass def queue_purge(self, *args, **kwargs): self._called('queue_purge') def basic_consume(self, *args, **kwargs): self._called('basic_consume') def basic_cancel(self, *args, **kwargs): self._called('basic_cancel') def basic_ack(self, *args, **kwargs): self._called('basic_ack') def basic_recover(self, requeue=False): self._called('basic_recover') def exchange_bind(self, *args, **kwargs): self._called('exchange_bind') def exchange_unbind(self, *args, **kwargs): self._called('exchange_unbind') def close(self): self._called('close') def message_to_python(self, message, *args, **kwargs): self._called('message_to_python') return Message(body=json.dumps(message), channel=self, delivery_tag=next(self.deliveries), throw_decode_error=self.throw_decode_error, content_type='application/json', content_encoding='utf-8') def flow(self, active): self._called('flow') def basic_reject(self, delivery_tag, requeue=False): if requeue: return self._called('basic_reject:requeue') return self._called('basic_reject') def basic_qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): self._called('basic_qos') class Connection(object): connected = True def __init__(self, client): self.client = client def channel(self): return Channel(self) class Transport(base.Transport): def establish_connection(self): return Connection(self.client) def create_channel(self, connection): return connection.channel() def drain_events(self, connection, **kwargs): return 'event' def close_connection(self, connection): connection.connected = False kombu-4.1.0/t/unit/0000755000175000017500000000000013134154263014003 5ustar omeromer00000000000000kombu-4.1.0/t/unit/test_message.py0000644000175000017500000000242213134153516017040 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import sys from case import Mock, patch from kombu.message import Message class test_Message: def test_repr(self): assert repr(Message('b', channel=Mock())) def test_decode(self): m = Message('body', channel=Mock()) decode = m._decode = Mock() assert m._decoded_cache is None assert m.decode() is m._decode.return_value assert m._decoded_cache is m._decode.return_value m._decode.assert_called_with() m._decode = Mock() assert m.decode() is decode.return_value def test_reraise_error(self): m = Message('body', channel=Mock()) callback = Mock(name='callback') try: raise KeyError('foo') except KeyError: m.errors.append(sys.exc_info()) m._reraise_error(callback) callback.assert_called() with pytest.raises(KeyError): m._reraise_error(None) @patch('kombu.message.decompress') def test_decompression_stores_error(self, decompress): decompress.side_effect = RuntimeError() m = Message('body', channel=Mock(), headers={'compression': 'zlib'}) with pytest.raises(RuntimeError): m._reraise_error(None) kombu-4.1.0/t/unit/test_exceptions.py0000644000175000017500000000035213130603207017566 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from case import Mock from kombu.exceptions import HttpError class test_HttpError: def test_str(self): assert str(HttpError(200, 'msg', Mock(name='response'))) kombu-4.1.0/t/unit/__init__.py0000644000175000017500000000007113130603207016103 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals kombu-4.1.0/t/unit/transport/0000755000175000017500000000000013134154263016037 5ustar omeromer00000000000000kombu-4.1.0/t/unit/transport/test_memory.py0000644000175000017500000001172513130603207020757 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import socket from kombu import Connection, Exchange, Queue, Consumer, Producer class test_MemoryTransport: def setup(self): self.c = Connection(transport='memory') self.e = Exchange('test_transport_memory') self.q = Queue('test_transport_memory', exchange=self.e, routing_key='test_transport_memory') self.q2 = Queue('test_transport_memory2', exchange=self.e, routing_key='test_transport_memory2') self.fanout = Exchange('test_transport_memory_fanout', type='fanout') self.q3 = Queue('test_transport_memory_fanout1', exchange=self.fanout) self.q4 = Queue('test_transport_memory_fanout2', exchange=self.fanout) def test_driver_version(self): assert self.c.transport.driver_version() def test_produce_consume_noack(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer = Consumer(channel, self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() assert len(_received) == 10 def test_produce_consume_fanout(self): producer = self.c.Producer() consumer = self.c.Consumer([self.q3, self.q4]) producer.publish( {'hello': 'world'}, declare=consumer.queues, exchange=self.fanout, ) assert self.q3(self.c).get().payload == {'hello': 'world'} assert self.q4(self.c).get().payload == {'hello': 'world'} assert self.q3(self.c).get() is None assert self.q4(self.c).get() is None def test_produce_consume(self): channel = self.c.channel() producer = Producer(channel, self.e) consumer1 = Consumer(channel, self.q) consumer2 = Consumer(channel, self.q2) self.q2(channel).declare() for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory2') _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) consumer1.consume() consumer2.consume() while 1: if len(_received1) + len(_received2) == 20: break self.c.drain_events() assert len(_received1) + len(_received2) == 20 # compression producer.publish({'compressed': True}, routing_key='test_transport_memory', compression='zlib') m = self.q(channel).get() assert m.payload == {'compressed': True} # queue.delete for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory') assert self.q(channel).get() self.q(channel).delete() self.q(channel).declare() assert self.q(channel).get() is None # queue.purge for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_memory2') assert self.q2(channel).get() self.q2(channel).purge() assert self.q2(channel).get() is None def test_drain_events(self): with pytest.raises(socket.timeout): self.c.drain_events(timeout=0.1) c1 = self.c.channel() c2 = self.c.channel() with pytest.raises(socket.timeout): self.c.drain_events(timeout=0.1) del(c1) # so pyflakes doesn't complain. del(c2) def test_drain_events_unregistered_queue(self): c1 = self.c.channel() producer = self.c.Producer() consumer = self.c.Consumer([self.q2]) producer.publish( {'hello': 'world'}, declare=consumer.queues, routing_key=self.q2.routing_key, exchange=self.q2.exchange, ) message = consumer.queues[0].get()._raw class Cycle(object): def get(self, callback, timeout=None): return (message, 'foo'), c1 self.c.transport.cycle = Cycle() self.c.drain_events() def test_queue_for(self): chan = self.c.channel() chan.queues.clear() x = chan._queue_for('foo') assert x assert chan._queue_for('foo') is x kombu-4.1.0/t/unit/transport/test_librabbitmq.py0000644000175000017500000001066313130603207021737 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, patch, skip try: import librabbitmq except ImportError: librabbitmq = None # noqa else: from kombu.transport import librabbitmq # noqa @skip.unless_module('librabbitmq') class lrmqCase: pass class test_Message(lrmqCase): def test_init(self): chan = Mock(name='channel') message = librabbitmq.Message( chan, {'prop': 42}, {'delivery_tag': 337}, 'body', ) assert message.body == 'body' assert message.delivery_tag == 337 assert message.properties['prop'] == 42 class test_Channel(lrmqCase): def test_prepare_message(self): conn = Mock(name='connection') chan = librabbitmq.Channel(conn, 1) assert chan body = 'the quick brown fox...' properties = {'name': 'Elaine M.'} body2, props2 = chan.prepare_message( body, properties=properties, priority=999, content_type='ctype', content_encoding='cenc', headers={'H': 2}, ) assert props2['name'] == 'Elaine M.' assert props2['priority'] == 999 assert props2['content_type'] == 'ctype' assert props2['content_encoding'] == 'cenc' assert props2['headers'] == {'H': 2} assert body2 == body body3, props3 = chan.prepare_message(body, priority=777) assert props3['priority'] == 777 assert body3 == body class test_Transport(lrmqCase): def setup(self): self.client = Mock(name='client') self.T = librabbitmq.Transport(self.client) def test_driver_version(self): assert self.T.driver_version() def test_create_channel(self): conn = Mock(name='connection') chan = self.T.create_channel(conn) assert chan conn.channel.assert_called_with() def test_drain_events(self): conn = Mock(name='connection') self.T.drain_events(conn, timeout=1.33) conn.drain_events.assert_called_with(timeout=1.33) def test_establish_connection_SSL_not_supported(self): self.client.ssl = True with pytest.raises(NotImplementedError): self.T.establish_connection() def test_establish_connection(self): self.T.Connection = Mock(name='Connection') self.T.client.ssl = False self.T.client.port = None self.T.client.transport_options = {} conn = self.T.establish_connection() assert self.T.client.port == self.T.default_connection_params['port'] assert conn.client == self.T.client assert self.T.client.drain_events == conn.drain_events def test_collect__no_conn(self): self.T.client.drain_events = 1234 self.T._collect(None) assert self.client.drain_events is None assert self.T.client is None def test_collect__with_conn(self): self.T.client.drain_events = 1234 conn = Mock(name='connection') chans = conn.channels = {1: Mock(name='chan1'), 2: Mock(name='chan2')} conn.callbacks = {'foo': Mock(name='cb1'), 'bar': Mock(name='cb2')} for i, chan in enumerate(conn.channels.values()): chan.connection = i with patch('os.close') as close: self.T._collect(conn) close.assert_called_with(conn.fileno()) assert not conn.channels assert not conn.callbacks for chan in chans.values(): assert chan.connection is None assert self.client.drain_events is None assert self.T.client is None with patch('os.close') as close: self.T.client = self.client close.side_effect = OSError() self.T._collect(conn) close.assert_called_with(conn.fileno()) def test_register_with_event_loop(self): conn = Mock(name='conn') loop = Mock(name='loop') self.T.register_with_event_loop(conn, loop) loop.add_reader.assert_called_with( conn.fileno(), self.T.on_readable, conn, loop, ) def test_verify_connection(self): conn = Mock(name='connection') conn.connected = True assert self.T.verify_connection(conn) def test_close_connection(self): conn = Mock(name='connection') self.client.drain_events = 1234 self.T.close_connection(conn) assert self.client.drain_events is None conn.close.assert_called_with() kombu-4.1.0/t/unit/transport/virtual/0000755000175000017500000000000013134154263017525 5ustar omeromer00000000000000kombu-4.1.0/t/unit/transport/virtual/__init__.py0000644000175000017500000000000013130603207021615 0ustar omeromer00000000000000kombu-4.1.0/t/unit/transport/virtual/test_base.py0000644000175000017500000004661113130603207022051 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import io import pytest import sys import warnings from case import MagicMock, Mock, patch from kombu import Connection from kombu.compression import compress from kombu.exceptions import ResourceError, ChannelError from kombu.transport import virtual from kombu.utils.uuid import uuid PY3 = sys.version_info[0] == 3 PRINT_FQDN = 'builtins.print' if PY3 else '__builtin__.print' def client(**kwargs): return Connection(transport='kombu.transport.virtual:Transport', **kwargs) def memory_client(): return Connection(transport='memory') def test_BrokerState(): s = virtual.BrokerState() assert hasattr(s, 'exchanges') t = virtual.BrokerState(exchanges=16) assert t.exchanges == 16 class test_QoS: def setup(self): self.q = virtual.QoS(client().channel(), prefetch_count=10) def teardown(self): self.q._on_collect.cancel() def test_constructor(self): assert self.q.channel assert self.q.prefetch_count assert not self.q._delivered.restored assert self.q._on_collect def test_restore_visible__interface(self): qos = virtual.QoS(client().channel()) qos.restore_visible() def test_can_consume(self, stdouts): stderr = io.StringIO() _restored = [] class RestoreChannel(virtual.Channel): do_restore = True def _restore(self, message): _restored.append(message) assert self.q.can_consume() for i in range(self.q.prefetch_count - 1): self.q.append(i, uuid()) assert self.q.can_consume() self.q.append(i + 1, uuid()) assert not self.q.can_consume() tag1 = next(iter(self.q._delivered)) self.q.ack(tag1) assert self.q.can_consume() tag2 = uuid() self.q.append(i + 2, tag2) assert not self.q.can_consume() self.q.reject(tag2) assert self.q.can_consume() self.q.channel = RestoreChannel(self.q.channel.connection) tag3 = uuid() self.q.append(i + 3, tag3) self.q.reject(tag3, requeue=True) self.q._flush() assert self.q._delivered assert not self.q._delivered.restored self.q.restore_unacked_once(stderr=stderr) assert _restored == [11, 9, 8, 7, 6, 5, 4, 3, 2, 1] assert self.q._delivered.restored assert not self.q._delivered self.q.restore_unacked_once(stderr=stderr) self.q._delivered.restored = False self.q.restore_unacked_once(stderr=stderr) assert stderr.getvalue() assert not stdouts.stdout.getvalue() self.q.restore_at_shutdown = False self.q.restore_unacked_once() def test_get(self): self.q._delivered['foo'] = 1 assert self.q.get('foo') == 1 class test_Message: def test_create(self): c = client().channel() data = c.prepare_message('the quick brown fox...') tag = data['properties']['delivery_tag'] = uuid() message = c.message_to_python(data) assert isinstance(message, virtual.Message) assert message is c.message_to_python(message) if message.errors: message._reraise_error() assert message.body == 'the quick brown fox...'.encode('utf-8') assert message.delivery_tag, tag def test_create_no_body(self): virtual.Message(channel=Mock(), payload={ 'body': None, 'properties': {'delivery_tag': 1}, }) def test_serializable(self): c = client().channel() body, content_type = compress('the quick brown fox...', 'gzip') data = c.prepare_message(body, headers={'compression': content_type}) tag = data['properties']['delivery_tag'] = uuid() message = c.message_to_python(data) dict_ = message.serializable() assert dict_['body'] == 'the quick brown fox...'.encode('utf-8') assert dict_['properties']['delivery_tag'] == tag assert 'compression' not in dict_['headers'] class test_AbstractChannel: def test_get(self): with pytest.raises(NotImplementedError): virtual.AbstractChannel()._get('queue') def test_put(self): with pytest.raises(NotImplementedError): virtual.AbstractChannel()._put('queue', 'm') def test_size(self): assert virtual.AbstractChannel()._size('queue') == 0 def test_purge(self): with pytest.raises(NotImplementedError): virtual.AbstractChannel()._purge('queue') def test_delete(self): with pytest.raises(NotImplementedError): virtual.AbstractChannel()._delete('queue') def test_new_queue(self): assert virtual.AbstractChannel()._new_queue('queue') is None def test_has_queue(self): assert virtual.AbstractChannel()._has_queue('queue') def test_poll(self): cycle = Mock(name='cycle') assert virtual.AbstractChannel()._poll(cycle, Mock()) cycle.get.assert_called() class test_Channel: def setup(self): self.channel = client().channel() def teardown(self): if self.channel._qos is not None: self.channel._qos._on_collect.cancel() def test_exceeds_channel_max(self): c = client() t = c.transport avail = t._avail_channel_ids = Mock(name='_avail_channel_ids') avail.pop.side_effect = IndexError() with pytest.raises(ResourceError): virtual.Channel(t) def test_exchange_bind_interface(self): with pytest.raises(NotImplementedError): self.channel.exchange_bind('dest', 'src', 'key') def test_exchange_unbind_interface(self): with pytest.raises(NotImplementedError): self.channel.exchange_unbind('dest', 'src', 'key') def test_queue_unbind_interface(self): self.channel.queue_unbind('dest', 'ex', 'key') def test_management(self): m = self.channel.connection.client.get_manager() assert m m.get_bindings() m.close() def test_exchange_declare(self): c = self.channel with pytest.raises(ChannelError): c.exchange_declare('test_exchange_declare', 'direct', durable=True, auto_delete=True, passive=True) c.exchange_declare('test_exchange_declare', 'direct', durable=True, auto_delete=True) c.exchange_declare('test_exchange_declare', 'direct', durable=True, auto_delete=True, passive=True) assert 'test_exchange_declare' in c.state.exchanges # can declare again with same values c.exchange_declare('test_exchange_declare', 'direct', durable=True, auto_delete=True) assert 'test_exchange_declare' in c.state.exchanges # using different values raises NotEquivalentError with pytest.raises(virtual.NotEquivalentError): c.exchange_declare('test_exchange_declare', 'direct', durable=False, auto_delete=True) def test_exchange_delete(self, ex='test_exchange_delete'): class PurgeChannel(virtual.Channel): purged = [] def _purge(self, queue): self.purged.append(queue) c = PurgeChannel(self.channel.connection) c.exchange_declare(ex, 'direct', durable=True, auto_delete=True) assert ex in c.state.exchanges assert not c.state.has_binding(ex, ex, ex) # no bindings yet c.exchange_delete(ex) assert ex not in c.state.exchanges c.exchange_declare(ex, 'direct', durable=True, auto_delete=True) c.queue_declare(ex) c.queue_bind(ex, ex, ex) assert c.state.has_binding(ex, ex, ex) c.exchange_delete(ex) assert not c.state.has_binding(ex, ex, ex) assert ex in c.purged def test_queue_delete__if_empty(self, n='test_queue_delete__if_empty'): class PurgeChannel(virtual.Channel): purged = [] size = 30 def _purge(self, queue): self.purged.append(queue) def _size(self, queue): return self.size c = PurgeChannel(self.channel.connection) c.exchange_declare(n) c.queue_declare(n) c.queue_bind(n, n, n) # tests code path that returns if queue already bound. c.queue_bind(n, n, n) c.queue_delete(n, if_empty=True) assert c.state.has_binding(n, n, n) c.size = 0 c.queue_delete(n, if_empty=True) assert not c.state.has_binding(n, n, n) assert n in c.purged def test_queue_purge(self, n='test_queue_purge'): class PurgeChannel(virtual.Channel): purged = [] def _purge(self, queue): self.purged.append(queue) c = PurgeChannel(self.channel.connection) c.exchange_declare(n) c.queue_declare(n) c.queue_bind(n, n, n) c.queue_purge(n) assert n in c.purged def test_basic_publish__anon_exchange(self): c = memory_client().channel() msg = MagicMock(name='msg') c.encode_body = Mock(name='c.encode_body') c.encode_body.return_value = (1, 2) c._put = Mock(name='c._put') c.basic_publish(msg, None, 'rkey', kw=1) c._put.assert_called_with('rkey', msg, kw=1) def test_basic_publish_unique_delivery_tags(self, n='test_uniq_tag'): c1 = memory_client().channel() c2 = memory_client().channel() for c in (c1, c2): c.exchange_declare(n) c.queue_declare(n) c.queue_bind(n, n, n) m1 = c1.prepare_message('George Costanza') m2 = c2.prepare_message('Elaine Marie Benes') c1.basic_publish(m1, n, n) c2.basic_publish(m2, n, n) r1 = c1.message_to_python(c1.basic_get(n)) r2 = c2.message_to_python(c2.basic_get(n)) assert r1.delivery_tag != r2.delivery_tag with pytest.raises(ValueError): int(r1.delivery_tag) with pytest.raises(ValueError): int(r2.delivery_tag) def test_basic_publish__get__consume__restore(self, n='test_basic_publish'): c = memory_client().channel() c.exchange_declare(n) c.queue_declare(n) c.queue_bind(n, n, n) c.queue_declare(n + '2') c.queue_bind(n + '2', n, n) messages = [] c.connection._deliver = Mock(name='_deliver') def on_deliver(message, queue): messages.append(message) c.connection._deliver.side_effect = on_deliver m = c.prepare_message('nthex quick brown fox...') c.basic_publish(m, n, n) r1 = c.message_to_python(c.basic_get(n)) assert r1 assert r1.body == 'nthex quick brown fox...'.encode('utf-8') assert c.basic_get(n) is None consumer_tag = uuid() c.basic_consume(n + '2', False, consumer_tag=consumer_tag, callback=lambda *a: None) assert n + '2' in c._active_queues c.drain_events() r2 = c.message_to_python(messages[-1]) assert r2.body == 'nthex quick brown fox...'.encode('utf-8') assert r2.delivery_info['exchange'] == n assert r2.delivery_info['routing_key'] == n with pytest.raises(virtual.Empty): c.drain_events() c.basic_cancel(consumer_tag) c._restore(r2) r3 = c.message_to_python(c.basic_get(n)) assert r3 assert r3.body == 'nthex quick brown fox...'.encode('utf-8') assert c.basic_get(n) is None def test_basic_ack(self): class MockQoS(virtual.QoS): was_acked = False def ack(self, delivery_tag): self.was_acked = True self.channel._qos = MockQoS(self.channel) self.channel.basic_ack('foo') assert self.channel._qos.was_acked def test_basic_recover__requeue(self): class MockQoS(virtual.QoS): was_restored = False def restore_unacked(self): self.was_restored = True self.channel._qos = MockQoS(self.channel) self.channel.basic_recover(requeue=True) assert self.channel._qos.was_restored def test_restore_unacked_raises_BaseException(self): q = self.channel.qos q._flush = Mock() q._delivered = {1: 1} q.channel._restore = Mock() q.channel._restore.side_effect = SystemExit errors = q.restore_unacked() assert isinstance(errors[0][0], SystemExit) assert errors[0][1] == 1 assert not q._delivered @patch('kombu.transport.virtual.base.emergency_dump_state') @patch(PRINT_FQDN) def test_restore_unacked_once_when_unrestored(self, print_, emergency_dump_state): q = self.channel.qos q._flush = Mock() class State(dict): restored = False q._delivered = State({1: 1}) ru = q.restore_unacked = Mock() exc = None try: raise KeyError() except KeyError as exc_: exc = exc_ ru.return_value = [(exc, 1)] self.channel.do_restore = True q.restore_unacked_once() print_.assert_called() emergency_dump_state.assert_called() def test_basic_recover(self): with pytest.raises(NotImplementedError): self.channel.basic_recover(requeue=False) def test_basic_reject(self): class MockQoS(virtual.QoS): was_rejected = False def reject(self, delivery_tag, requeue=False): self.was_rejected = True self.channel._qos = MockQoS(self.channel) self.channel.basic_reject('foo') assert self.channel._qos.was_rejected def test_basic_qos(self): self.channel.basic_qos(prefetch_count=128) assert self.channel._qos.prefetch_count == 128 def test_lookup__undeliverable(self, n='test_lookup__undeliverable'): warnings.resetwarnings() with warnings.catch_warnings(record=True) as log: assert self.channel._lookup(n, n, 'ae.undeliver') == [ 'ae.undeliver', ] assert log assert 'could not be delivered' in log[0].message.args[0] def test_context(self): x = self.channel.__enter__() assert x is self.channel x.__exit__() assert x.closed def test_cycle_property(self): assert self.channel.cycle def test_flow(self): with pytest.raises(NotImplementedError): self.channel.flow(False) def test_close_when_no_connection(self): self.channel.connection = None self.channel.close() assert self.channel.closed def test_drain_events_has_get_many(self): c = self.channel c._get_many = Mock() c._poll = Mock() c._consumers = [1] c._qos = Mock() c._qos.can_consume.return_value = True c.drain_events(timeout=10.0) c._get_many.assert_called_with(c._active_queues, timeout=10.0) def test_get_exchanges(self): self.channel.exchange_declare(exchange='unique_name') assert self.channel.get_exchanges() def test_basic_cancel_not_in_active_queues(self): c = self.channel c._consumers.add('x') c._tag_to_queue['x'] = 'foo' c._active_queues = Mock() c._active_queues.remove.side_effect = ValueError() c.basic_cancel('x') c._active_queues.remove.assert_called_with('foo') def test_basic_cancel_unknown_ctag(self): assert self.channel.basic_cancel('unknown-tag') is None def test_list_bindings(self): c = self.channel c.exchange_declare(exchange='unique_name') c.queue_declare(queue='q') c.queue_bind(queue='q', exchange='unique_name', routing_key='rk') assert ('q', 'unique_name', 'rk') in list(c.list_bindings()) def test_after_reply_message_received(self): c = self.channel c.queue_delete = Mock() c.after_reply_message_received('foo') c.queue_delete.assert_called_with('foo') def test_queue_delete_unknown_queue(self): assert self.channel.queue_delete('xiwjqjwel') is None def test_queue_declare_passive(self): has_queue = self.channel._has_queue = Mock() has_queue.return_value = False with pytest.raises(ChannelError): self.channel.queue_declare(queue='21wisdjwqe', passive=True) def test_get_message_priority(self): def _message(priority): return self.channel.prepare_message( 'the message with priority', priority=priority, ) assert self.channel._get_message_priority(_message(5)) == 5 assert self.channel._get_message_priority( _message(self.channel.min_priority - 10) ) == self.channel.min_priority assert self.channel._get_message_priority( _message(self.channel.max_priority + 10), ) == self.channel.max_priority assert self.channel._get_message_priority( _message('foobar'), ) == self.channel.default_priority assert self.channel._get_message_priority( _message(2), reverse=True, ) == self.channel.max_priority - 2 class test_Transport: def setup(self): self.transport = client().transport def test_custom_polling_interval(self): x = client(transport_options=dict(polling_interval=32.3)) assert x.transport.polling_interval == 32.3 def test_close_connection(self): c1 = self.transport.create_channel(self.transport) c2 = self.transport.create_channel(self.transport) assert len(self.transport.channels) == 2 self.transport.close_connection(self.transport) assert not self.transport.channels del(c1) # so pyflakes doesn't complain del(c2) def test_drain_channel(self): channel = self.transport.create_channel(self.transport) with pytest.raises(virtual.Empty): self.transport._drain_channel(channel, Mock()) def test__deliver__no_queue(self): with pytest.raises(KeyError): self.transport._deliver(Mock(name='msg'), queue=None) def test__reject_inbound_message(self): channel = Mock(name='channel') self.transport.channels = [None, channel] self.transport._reject_inbound_message({'foo': 'bar'}) channel.Message.assert_called_with({'foo': 'bar'}, channel=channel) channel.qos.append.assert_called_with( channel.Message(), channel.Message().delivery_tag, ) channel.basic_reject.assert_called_with( channel.Message().delivery_tag, requeue=True, ) def test_on_message_ready(self): channel = Mock(name='channel') msg = Mock(name='msg') callback = Mock(name='callback') self.transport._callbacks = {'q1': callback} self.transport.on_message_ready(channel, msg, queue='q1') callback.assert_called_with(msg) def test_on_message_ready__no_queue(self): with pytest.raises(KeyError): self.transport.on_message_ready( Mock(name='channel'), Mock(name='msg'), queue=None) def test_on_message_ready__no_callback(self): self.transport._callbacks = {} with pytest.raises(KeyError): self.transport.on_message_ready( Mock(name='channel'), Mock(name='msg'), queue='q1') kombu-4.1.0/t/unit/transport/virtual/test_exchange.py0000644000175000017500000001266213130603207022720 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock from kombu import Connection from kombu.transport.virtual import exchange from t.mocks import Transport class ExchangeCase: type = None def setup(self): if self.type: self.e = self.type(Connection(transport=Transport).channel()) class test_Direct(ExchangeCase): type = exchange.DirectExchange table = [('rFoo', None, 'qFoo'), ('rFoo', None, 'qFox'), ('rBar', None, 'qBar'), ('rBaz', None, 'qBaz')] @pytest.mark.parametrize('exchange,routing_key,default,expected', [ ('eFoo', 'rFoo', None, {'qFoo', 'qFox'}), ('eMoz', 'rMoz', 'DEFAULT', set()), ('eBar', 'rBar', None, {'qBar'}), ]) def test_lookup(self, exchange, routing_key, default, expected): assert self.e.lookup( self.table, exchange, routing_key, default) == expected class test_Fanout(ExchangeCase): type = exchange.FanoutExchange table = [(None, None, 'qFoo'), (None, None, 'qFox'), (None, None, 'qBar')] def test_lookup(self): assert self.e.lookup(self.table, 'eFoo', 'rFoo', None) == { 'qFoo', 'qFox', 'qBar', } def test_deliver_when_fanout_supported(self): self.e.channel = Mock() self.e.channel.supports_fanout = True message = Mock() self.e.deliver(message, 'exchange', 'rkey') self.e.channel._put_fanout.assert_called_with( 'exchange', message, 'rkey', ) def test_deliver_when_fanout_unsupported(self): self.e.channel = Mock() self.e.channel.supports_fanout = False self.e.deliver(Mock(), 'exchange', None) self.e.channel._put_fanout.assert_not_called() class test_Topic(ExchangeCase): type = exchange.TopicExchange table = [ ('stock.#', None, 'rFoo'), ('stock.us.*', None, 'rBar'), ] def setup(self): ExchangeCase.setup(self) self.table = [(rkey, self.e.key_to_pattern(rkey), queue) for rkey, _, queue in self.table] def test_prepare_bind(self): x = self.e.prepare_bind('qFoo', 'eFoo', 'stock.#', {}) assert x == ('stock.#', r'^stock\..*?$', 'qFoo') @pytest.mark.parametrize('exchange,routing_key,default,expected', [ ('eFoo', 'stock.us.nasdaq', None, {'rFoo', 'rBar'}), ('eFoo', 'stock.europe.OSE', None, {'rFoo'}), ('eFoo', 'stockxeuropexOSE', None, set()), ('eFoo', 'candy.schleckpulver.snap_crackle', None, set()), ]) def test_lookup(self, exchange, routing_key, default, expected): assert self.e.lookup( self.table, exchange, routing_key, default) == expected assert self.e._compiled def test_deliver(self): self.e.channel = Mock() self.e.channel._lookup.return_value = ('a', 'b') message = Mock() self.e.deliver(message, 'exchange', 'rkey') assert self.e.channel._put.call_args_list == [ (('a', message), {}), (('b', message), {}), ] class test_TopicMultibind(ExchangeCase): # Testing message delivery in case of multiple overlapping # bindings for the same queue. As AMQP states, in case of # overlapping bindings, a message must be delivered once to # each matching queue. type = exchange.TopicExchange table = [ ('stock', None, 'rFoo'), ('stock.#', None, 'rFoo'), ('stock.us.*', None, 'rFoo'), ('#', None, 'rFoo'), ] def setup(self): ExchangeCase.setup(self) self.table = [(rkey, self.e.key_to_pattern(rkey), queue) for rkey, _, queue in self.table] @pytest.mark.parametrize('exchange,routing_key,default,expected', [ ('eFoo', 'stock.us.nasdaq', None, {'rFoo'}), ('eFoo', 'stock.europe.OSE', None, {'rFoo'}), ('eFoo', 'stockxeuropexOSE', None, {'rFoo'}), ('eFoo', 'candy.schleckpulver.snap_crackle', None, {'rFoo'}), ]) def test_lookup(self, exchange, routing_key, default, expected): assert self.e._compiled assert self.e.lookup( self.table, exchange, routing_key, default) == expected class test_ExchangeType(ExchangeCase): type = exchange.ExchangeType def test_lookup(self): with pytest.raises(NotImplementedError): self.e.lookup([], 'eFoo', 'rFoo', None) def test_prepare_bind(self): assert self.e.prepare_bind('qFoo', 'eFoo', 'rFoo', {}) == ( 'rFoo', None, 'qFoo', ) e1 = dict( type='direct', durable=True, auto_delete=True, arguments={}, ) e2 = dict(e1, arguments={'expires': 3000}) @pytest.mark.parametrize('ex,eq,name,type,durable,auto_delete,arguments', [ (e1, True, 'eFoo', 'direct', True, True, {}), (e1, False, 'eFoo', 'topic', True, True, {}), (e1, False, 'eFoo', 'direct', False, True, {}), (e1, False, 'eFoo', 'direct', True, False, {}), (e1, False, 'eFoo', 'direct', True, True, {'expires': 3000}), (e2, True, 'eFoo', 'direct', True, True, {'expires': 3000}), (e2, False, 'eFoo', 'direct', True, True, {'expires': 6000}), ]) def test_equivalent( self, ex, eq, name, type, durable, auto_delete, arguments): is_eq = self.e.equivalent( ex, name, type, durable, auto_delete, arguments) assert is_eq if eq else not is_eq kombu-4.1.0/t/unit/transport/test_sqlalchemy.py0000644000175000017500000000302313130603207021601 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import patch, skip from kombu import Connection @skip.unless_module('sqlalchemy') class test_SqlAlchemy: def test_url_parser(self): with patch('kombu.transport.sqlalchemy.Channel._open'): url = 'sqlalchemy+sqlite:///celerydb.sqlite' Connection(url).connect() url = 'sqla+sqlite:///celerydb.sqlite' Connection(url).connect() url = 'sqlb+sqlite:///celerydb.sqlite' with pytest.raises(KeyError): Connection(url).connect() def test_simple_queueing(self): conn = Connection('sqlalchemy+sqlite:///:memory:') conn.connect() try: channel = conn.channel() assert channel.queue_cls.__table__.name == 'kombu_queue' assert channel.message_cls.__table__.name == 'kombu_message' channel._put('celery', 'DATA_SIMPLE_QUEUEING') assert channel._get('celery') == 'DATA_SIMPLE_QUEUEING' finally: conn.release() def test_clone(self): hostname = 'sqlite:///celerydb.sqlite' x = Connection('+'.join(['sqla', hostname])) try: assert x.uri_prefix == 'sqla' assert x.hostname == hostname clone = x.clone() try: assert clone.hostname == hostname assert clone.uri_prefix == 'sqla' finally: clone.release() finally: x.release() kombu-4.1.0/t/unit/transport/test_SQS.py0000644000175000017500000003412313134153527020123 0ustar omeromer00000000000000"""Testing module for the kombu.transport.SQS package. NOTE: The SQSQueueMock and SQSConnectionMock classes originally come from http://github.com/pcsforeducation/sqs-mock-python. They have been patched slightly. """ from __future__ import absolute_import, unicode_literals import pytest import random import string from case import Mock, skip from kombu import messaging from kombu import Connection, Exchange, Queue from kombu.five import Empty from kombu.transport import SQS SQS_Channel_sqs = SQS.Channel.sqs class SQSMessageMock(object): def __init__(self): """ Imitate the SQS Message from boto3. """ self.body = "" self.receipt_handle = "receipt_handle_xyz" class QueueMock(object): """ Hold information about a queue. """ def __init__(self, url): self.url = url self.attributes = {'ApproximateNumberOfMessages': '0'} self.messages = [] def __repr__(self): return 'QueueMock: {} {} messages'.format(self.url, len(self.messages)) class SQSClientMock(object): def __init__(self): """ Imitate the SQS Client from boto3. """ self._receive_messages_calls = 0 # _queues doesn't exist on the real client, here for testing. self._queues = {} for n in range(1): name = 'q_{}'.format(n) url = 'sqs://q_{}'.format(n) self.create_queue(QueueName=name) url = self.create_queue(QueueName='unittest_queue')['QueueUrl'] self.send_message(QueueUrl=url, MessageBody='hello') def _get_q(self, url): """ Helper method to quickly get a queue. """ for q in self._queues.values(): if q.url == url: return q raise Exception("Queue url {} not found".format(url)) def create_queue(self, QueueName=None, Attributes=None): q = self._queues[QueueName] = QueueMock('sqs://' + QueueName) return {'QueueUrl': q.url} def list_queues(self, QueueNamePrefix=None): """ Return a list of queue urls """ urls = (val.url for key, val in self._queues.items() if key.startswith(QueueNamePrefix)) return {'QueueUrls': urls} def get_queue_url(self, QueueName=None): return self._queues[QueueName] def send_message(self, QueueUrl=None, MessageBody=None): for q in self._queues.values(): if q.url == QueueUrl: handle = ''.join(random.choice(string.ascii_lowercase) for x in range(10)) q.messages.append({'Body': MessageBody, 'ReceiptHandle': handle}) break def receive_message(self, QueueUrl=None, MaxNumberOfMessages=1, WaitTimeSeconds=10): self._receive_messages_calls += 1 for q in self._queues.values(): if q.url == QueueUrl: msgs = q.messages[:MaxNumberOfMessages] q.messages = q.messages[MaxNumberOfMessages:] return {'Messages': msgs} if msgs else {} def get_queue_attributes(self, QueueUrl=None, AttributeNames=None): if 'ApproximateNumberOfMessages' in AttributeNames: count = len(self._get_q(QueueUrl).messages) return {'Attributes': {'ApproximateNumberOfMessages': count}} def purge_queue(self, QueueUrl=None): for q in self._queues.values(): if q.url == QueueUrl: q.messages = [] @skip.unless_module('boto3') class test_Channel: def handleMessageCallback(self, message): self.callback_message = message def setup(self): """Mock the back-end SQS classes""" # Sanity check... if SQS is None, then it did not import and we # cannot execute our tests. SQS.Channel._queue_cache.clear() # Common variables used in the unit tests self.queue_name = 'unittest' # Mock the sqs() method that returns an SQSConnection object and # instead return an SQSConnectionMock() object. self.sqs_conn_mock = SQSClientMock() def mock_sqs(): return self.sqs_conn_mock SQS.Channel.sqs = mock_sqs() # Set up a task exchange for passing tasks through the queue self.exchange = Exchange('test_SQS', type='direct') self.queue = Queue(self.queue_name, self.exchange, self.queue_name) # Mock up a test SQS Queue with the QueueMock class (and always # make sure its a clean empty queue) self.sqs_queue_mock = QueueMock('sqs://' + self.queue_name) # Now, create our Connection object with the SQS Transport and store # the connection/channel objects as references for use in these tests. self.connection = Connection(transport=SQS.Transport) self.channel = self.connection.channel() self.queue(self.channel).declare() self.producer = messaging.Producer(self.channel, self.exchange, routing_key=self.queue_name) # Lastly, make sure that we're set up to 'consume' this queue. self.channel.basic_consume(self.queue_name, no_ack=False, callback=self.handleMessageCallback, consumer_tag='unittest') def teardown(self): # Removes QoS reserved messages so we don't restore msgs on shutdown. try: qos = self.channel._qos except AttributeError: pass else: if qos: qos._dirty.clear() qos._delivered.clear() def test_init(self): """kombu.SQS.Channel instantiates correctly with mocked queues""" assert self.queue_name in self.channel._queue_cache def test_endpoint_url(self): url = 'sqs://@localhost:5493' self.connection = Connection(hostname=url, transport=SQS.Transport) self.channel = self.connection.channel() self.channel._sqs = None expected_endpoint_url = 'http://localhost:5493' assert self.channel.endpoint_url == expected_endpoint_url boto3_sqs = SQS_Channel_sqs.__get__(self.channel, SQS.Channel) assert boto3_sqs._endpoint.host == expected_endpoint_url def test_none_hostname_persists(self): conn = Connection(hostname=None, transport=SQS.Transport) assert conn.hostname == conn.clone().hostname def test_new_queue(self): queue_name = 'new_unittest_queue' self.channel._new_queue(queue_name) assert queue_name in self.sqs_conn_mock._queues.keys() # For cleanup purposes, delete the queue and the queue file self.channel._delete(queue_name) def test_dont_create_duplicate_new_queue(self): # All queue names start with "q", except "unittest_queue". # which is definitely out of cache when get_all_queues returns the # first 1000 queues sorted by name. queue_name = 'unittest_queue' # This should not create a new queue. self.channel._new_queue(queue_name) assert queue_name in self.sqs_conn_mock._queues.keys() queue = self.sqs_conn_mock._queues[queue_name] # The queue originally had 1 message in it. assert 1 == len(queue.messages) assert 'hello' == queue.messages[0]['Body'] def test_delete(self): queue_name = 'new_unittest_queue' self.channel._new_queue(queue_name) self.channel._delete(queue_name) assert queue_name not in self.channel._queue_cache def test_get_from_sqs(self): # Test getting a single message message = 'my test message' self.producer.publish(message) result = self.channel._get(self.queue_name) assert 'body' in result.keys() # Now test getting many messages for i in range(3): message = 'message: {0}'.format(i) self.producer.publish(message) self.channel._get_bulk(self.queue_name, max_if_unlimited=3) assert len(self.sqs_conn_mock._queues[self.queue_name].messages) == 0 def test_get_with_empty_list(self): with pytest.raises(Empty): self.channel._get(self.queue_name) def test_get_bulk_raises_empty(self): with pytest.raises(Empty): self.channel._get_bulk(self.queue_name) def test_messages_to_python(self): from kombu.async.aws.sqs.message import Message kombu_message_count = 3 json_message_count = 3 # Create several test messages and publish them for i in range(kombu_message_count): message = 'message: %s' % i self.producer.publish(message) # json formatted message NOT created by kombu for i in range(json_message_count): message = {'foo': 'bar'} self.channel._put(self.producer.routing_key, message) q_url = self.channel._new_queue(self.queue_name) # Get the messages now kombu_messages = [] for m in self.sqs_conn_mock.receive_message( QueueUrl=q_url, MaxNumberOfMessages=kombu_message_count)['Messages']: m['Body'] = Message(body=m['Body']).decode() kombu_messages.append(m) json_messages = [] for m in self.sqs_conn_mock.receive_message( QueueUrl=q_url, MaxNumberOfMessages=json_message_count)['Messages']: m['Body'] = Message(body=m['Body']).decode() json_messages.append(m) # Now convert them to payloads kombu_payloads = self.channel._messages_to_python( kombu_messages, self.queue_name, ) json_payloads = self.channel._messages_to_python( json_messages, self.queue_name, ) # We got the same number of payloads back, right? assert len(kombu_payloads) == kombu_message_count assert len(json_payloads) == json_message_count # Make sure they're payload-style objects for p in kombu_payloads: assert 'properties' in p for p in json_payloads: assert 'properties' in p def test_put_and_get(self): message = 'my test message' self.producer.publish(message) results = self.queue(self.channel).get().payload assert message == results def test_put_and_get_bulk(self): # With QoS.prefetch_count = 0 message = 'my test message' self.producer.publish(message) self.channel.connection._deliver = Mock(name='_deliver') self.channel._get_bulk(self.queue_name) self.channel.connection._deliver.assert_called_once() def test_puts_and_get_bulk(self): # Generate 8 messages message_count = 8 # Set the prefetch_count to 5 self.channel.qos.prefetch_count = 5 # Now, generate all the messages for i in range(message_count): message = 'message: %s' % i self.producer.publish(message) # Count how many messages are retrieved the first time. Should # be 5 (message_count). self.channel.connection._deliver = Mock(name='_deliver') self.channel._get_bulk(self.queue_name) assert self.channel.connection._deliver.call_count == 5 for i in range(5): self.channel.qos.append(Mock(name='message{0}'.format(i)), i) # Now, do the get again, the number of messages returned should be 1. self.channel.connection._deliver.reset_mock() self.channel._get_bulk(self.queue_name) self.channel.connection._deliver.assert_called_once() def test_drain_events_with_empty_list(self): def mock_can_consume(): return False self.channel.qos.can_consume = mock_can_consume with pytest.raises(Empty): self.channel.drain_events() def test_drain_events_with_prefetch_5(self): # Generate 20 messages message_count = 20 prefetch_count = 5 current_delivery_tag = [1] # Set the prefetch_count to 5 self.channel.qos.prefetch_count = prefetch_count self.channel.connection._deliver = Mock(name='_deliver') def on_message_delivered(message, queue): current_delivery_tag[0] += 1 self.channel.qos.append(message, current_delivery_tag[0]) self.channel.connection._deliver.side_effect = on_message_delivered # Now, generate all the messages for i in range(message_count): self.producer.publish('message: %s' % i) # Now drain all the events for i in range(1000): try: self.channel.drain_events(timeout=0) except Empty: break else: assert False, 'disabled infinite loop' self.channel.qos._flush() assert len(self.channel.qos._delivered) == prefetch_count assert self.channel.connection._deliver.call_count == prefetch_count def test_drain_events_with_prefetch_none(self): # Generate 20 messages message_count = 20 expected_receive_messages_count = 3 current_delivery_tag = [1] # Set the prefetch_count to None self.channel.qos.prefetch_count = None self.channel.connection._deliver = Mock(name='_deliver') def on_message_delivered(message, queue): current_delivery_tag[0] += 1 self.channel.qos.append(message, current_delivery_tag[0]) self.channel.connection._deliver.side_effect = on_message_delivered # Now, generate all the messages for i in range(message_count): self.producer.publish('message: %s' % i) # Now drain all the events for i in range(1000): try: self.channel.drain_events(timeout=0) except Empty: break else: assert False, 'disabled infinite loop' assert self.channel.connection._deliver.call_count == message_count # How many times was the SQSConnectionMock receive_message method # called? assert (expected_receive_messages_count == self.sqs_conn_mock._receive_messages_calls) kombu-4.1.0/t/unit/transport/__init__.py0000644000175000017500000000000013130603207020127 0ustar omeromer00000000000000kombu-4.1.0/t/unit/transport/test_transport.py0000644000175000017500000000200313130603207021470 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from case import Mock, patch from kombu import transport class test_supports_librabbitmq: def test_eventlet(self): with patch('kombu.transport._detect_environment') as de: de.return_value = 'eventlet' assert not transport.supports_librabbitmq() class test_transport: def test_resolve_transport(self): from kombu.transport.memory import Transport assert transport.resolve_transport( 'kombu.transport.memory:Transport') is Transport assert transport.resolve_transport(Transport) is Transport def test_resolve_transport_alias_callable(self): m = transport.TRANSPORT_ALIASES['George'] = Mock(name='lazyalias') try: transport.resolve_transport('George') m.assert_called_with() finally: transport.TRANSPORT_ALIASES.pop('George') def test_resolve_transport_alias(self): assert transport.resolve_transport('pyamqp') kombu-4.1.0/t/unit/transport/test_pyamqp.py0000644000175000017500000001205113130603207020747 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import sys from itertools import count from case import Mock, mock, patch from kombu import Connection from kombu.five import nextfun from kombu.transport import pyamqp def test_amqps_connection(): conn = Connection('amqps://') assert conn.transport # evaluate transport, don't connect assert conn.ssl class MockConnection(dict): def __setattr__(self, key, value): self[key] = value def connect(self): pass class test_Channel: def setup(self): class Channel(pyamqp.Channel): wait_returns = [] def _x_open(self, *args, **kwargs): pass def wait(self, *args, **kwargs): return self.wait_returns def _send_method(self, *args, **kwargs): pass self.conn = Mock() self.conn._get_free_channel_id.side_effect = nextfun(count(0)) self.conn.channels = {} self.channel = Channel(self.conn, 0) def test_init(self): assert not self.channel.no_ack_consumers def test_prepare_message(self): assert self.channel.prepare_message( 'foobar', 10, 'application/data', 'utf-8', properties={}, ) def test_message_to_python(self): message = Mock() message.headers = {} message.properties = {} assert self.channel.message_to_python(message) def test_close_resolves_connection_cycle(self): assert self.channel.connection is not None self.channel.close() assert self.channel.connection is None def test_basic_consume_registers_ack_status(self): self.channel.wait_returns = 'my-consumer-tag' self.channel.basic_consume('foo', no_ack=True) assert 'my-consumer-tag' in self.channel.no_ack_consumers self.channel.wait_returns = 'other-consumer-tag' self.channel.basic_consume('bar', no_ack=False) assert 'other-consumer-tag' not in self.channel.no_ack_consumers self.channel.basic_cancel('my-consumer-tag') assert 'my-consumer-tag' not in self.channel.no_ack_consumers class test_Transport: def setup(self): self.connection = Connection('pyamqp://') self.transport = self.connection.transport def test_create_channel(self): connection = Mock() self.transport.create_channel(connection) connection.channel.assert_called_with() def test_driver_version(self): assert self.transport.driver_version() def test_drain_events(self): connection = Mock() self.transport.drain_events(connection, timeout=10.0) connection.drain_events.assert_called_with(timeout=10.0) def test_dnspython_localhost_resolve_bug(self): class Conn(object): def __init__(self, **kwargs): vars(self).update(kwargs) def connect(self): pass self.transport.Connection = Conn self.transport.client.hostname = 'localhost' conn1 = self.transport.establish_connection() assert conn1.host == '127.0.0.1:5672' self.transport.client.hostname = 'example.com' conn2 = self.transport.establish_connection() assert conn2.host == 'example.com:5672' def test_close_connection(self): connection = Mock() connection.client = Mock() self.transport.close_connection(connection) assert connection.client is None connection.close.assert_called_with() @mock.mask_modules('ssl') def test_import_no_ssl(self): pm = sys.modules.pop('amqp.connection') try: from amqp.connection import SSLError assert SSLError.__module__ == 'amqp.connection' finally: if pm is not None: sys.modules['amqp.connection'] = pm class test_pyamqp: def test_default_port(self): class Transport(pyamqp.Transport): Connection = MockConnection c = Connection(port=None, transport=Transport).connect() assert c['host'] == '127.0.0.1:%s' % (Transport.default_port,) def test_custom_port(self): class Transport(pyamqp.Transport): Connection = MockConnection c = Connection(port=1337, transport=Transport).connect() assert c['host'] == '127.0.0.1:1337' def test_register_with_event_loop(self): t = pyamqp.Transport(Mock()) conn = Mock(name='conn') loop = Mock(name='loop') t.register_with_event_loop(conn, loop) loop.add_reader.assert_called_with( conn.sock, t.on_readable, conn, loop, ) def test_heartbeat_check(self): t = pyamqp.Transport(Mock()) conn = Mock() t.heartbeat_check(conn, rate=4.331) conn.heartbeat_tick.assert_called_with(rate=4.331) def test_get_manager(self): with patch('kombu.transport.pyamqp.get_manager') as get_manager: t = pyamqp.Transport(Mock()) t.get_manager(1, kw=2) get_manager.assert_called_with(t.client, 1, kw=2) kombu-4.1.0/t/unit/transport/test_redis.py0000644000175000017500000012357113130603207020560 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import socket import types from collections import defaultdict from itertools import count from case import ANY, ContextMock, Mock, call, mock, skip, patch from kombu import Connection, Exchange, Queue, Consumer, Producer from kombu.exceptions import InconsistencyError, VersionMismatch from kombu.five import Empty, Queue as _Queue, bytes_if_py2 from kombu.transport import virtual from kombu.utils import eventio # patch poll from kombu.utils.json import dumps class _poll(eventio._select): def register(self, fd, flags): if flags & eventio.READ: self._rfd.add(fd) def poll(self, timeout): events = [] for fd in self._rfd: if fd.data: events.append((fd.fileno(), eventio.READ)) return events eventio.poll = _poll # must import after poller patch, pep8 complains from kombu.transport import redis # noqa class ResponseError(Exception): pass class Client(object): queues = {} sets = defaultdict(set) hashes = defaultdict(dict) shard_hint = None def __init__(self, db=None, port=None, connection_pool=None, **kwargs): self._called = [] self._connection = None self.bgsave_raises_ResponseError = False self.connection = self._sconnection(self) def bgsave(self): self._called.append('BGSAVE') if self.bgsave_raises_ResponseError: raise ResponseError() def delete(self, key): self.queues.pop(key, None) def exists(self, key): return key in self.queues or key in self.sets def hset(self, key, k, v): self.hashes[key][k] = v def hget(self, key, k): return self.hashes[key].get(k) def hdel(self, key, k): self.hashes[key].pop(k, None) def sadd(self, key, member, *args): self.sets[key].add(member) def zadd(self, key, score1, member1, *args): self.sets[key].add(member1) def smembers(self, key): return self.sets.get(key, set()) def ping(self, *args, **kwargs): return True def srem(self, key, *args): self.sets.pop(key, None) zrem = srem def llen(self, key): try: return self.queues[key].qsize() except KeyError: return 0 def lpush(self, key, value): self.queues[key].put_nowait(value) def parse_response(self, connection, type, **options): cmd, queues = self.connection._sock.data.pop() queues = list(queues) assert cmd == type self.connection._sock.data = [] if type == 'BRPOP': timeout = queues.pop() item = self.brpop(queues, timeout) if item: return item raise Empty() def brpop(self, keys, timeout=None): for key in keys: try: item = self.queues[key].get_nowait() except Empty: pass else: return key, item def rpop(self, key): try: return self.queues[key].get_nowait() except (KeyError, Empty): pass def __contains__(self, k): return k in self._called def pipeline(self): return Pipeline(self) def encode(self, value): return str(value) def _new_queue(self, key): self.queues[key] = _Queue() class _sconnection(object): disconnected = False class _socket(object): blocking = True filenos = count(30) def __init__(self, *args): self._fileno = next(self.filenos) self.data = [] def fileno(self): return self._fileno def setblocking(self, blocking): self.blocking = blocking def __init__(self, client): self.client = client self._sock = self._socket() def disconnect(self): self.disconnected = True def send_command(self, cmd, *args): self._sock.data.append((cmd, args)) def info(self): return {'foo': 1} def pubsub(self, *args, **kwargs): connection = self.connection class ConnectionPool(object): def get_connection(self, *args, **kwargs): return connection self.connection_pool = ConnectionPool() return self class Pipeline(object): def __init__(self, client): self.client = client self.stack = [] def __enter__(self): return self def __exit__(self, *exc_info): pass def __getattr__(self, key): if key not in self.__dict__: def _add(*args, **kwargs): self.stack.append((getattr(self.client, key), args, kwargs)) return self return _add return self.__dict__[key] def execute(self): stack = list(self.stack) self.stack[:] = [] return [fun(*args, **kwargs) for fun, args, kwargs in stack] class Channel(redis.Channel): def _get_client(self): return Client def _get_pool(self, async=False): return Mock() def _get_response_error(self): return ResponseError def _new_queue(self, queue, **kwargs): for pri in self.priority_steps: self.client._new_queue(self._q_for_pri(queue, pri)) def pipeline(self): return Pipeline(Client()) class Transport(redis.Transport): Channel = Channel def _get_errors(self): return ((KeyError,), (IndexError,)) @skip.unless_module('redis') class test_Channel: def setup(self): self.connection = self.create_connection() self.channel = self.connection.default_channel def create_connection(self, **kwargs): kwargs.setdefault('transport_options', {'fanout_patterns': True}) return Connection(transport=Transport, **kwargs) def _get_one_delivery_tag(self, n='test_uniq_tag'): with self.create_connection() as conn1: chan = conn1.default_channel chan.exchange_declare(n) chan.queue_declare(n) chan.queue_bind(n, n, n) msg = chan.prepare_message('quick brown fox') chan.basic_publish(msg, n, n) payload = chan._get(n) assert payload pymsg = chan.message_to_python(payload) return pymsg.delivery_tag def test_delivery_tag_is_uuid(self): seen = set() for i in range(100): tag = self._get_one_delivery_tag() assert tag not in seen seen.add(tag) with pytest.raises(ValueError): int(tag) assert len(tag) == 36 def test_disable_ack_emulation(self): conn = Connection(transport=Transport, transport_options={ 'ack_emulation': False, }) chan = conn.channel() assert not chan.ack_emulation assert chan.QoS == virtual.QoS def test_redis_ping_raises(self): pool = Mock(name='pool') pool_at_init = [pool] client = Mock(name='client') class XChannel(Channel): def __init__(self, *args, **kwargs): self._pool = pool_at_init[0] super(XChannel, self).__init__(*args, **kwargs) def _get_client(self): return lambda *_, **__: client class XTransport(Transport): Channel = XChannel conn = Connection(transport=XTransport) client.ping.side_effect = RuntimeError() with pytest.raises(RuntimeError): conn.channel() pool.disconnect.assert_called_with() pool.disconnect.reset_mock() pool_at_init = [None] with pytest.raises(RuntimeError): conn.channel() pool.disconnect.assert_not_called() def test_after_fork(self): self.channel._pool = None self.channel._after_fork() pool = self.channel._pool = Mock(name='pool') self.channel._after_fork() pool.disconnect.assert_called_with() def test_next_delivery_tag(self): assert (self.channel._next_delivery_tag() != self.channel._next_delivery_tag()) def test_do_restore_message(self): client = Mock(name='client') pl1 = {'body': 'BODY'} spl1 = dumps(pl1) lookup = self.channel._lookup = Mock(name='_lookup') lookup.return_value = {'george', 'elaine'} self.channel._do_restore_message( pl1, 'ex', 'rkey', client, ) client.rpush.assert_has_calls([ call('george', spl1), call('elaine', spl1), ], any_order=True) client = Mock(name='client') pl2 = {'body': 'BODY2', 'headers': {'x-funny': 1}} headers_after = dict(pl2['headers'], redelivered=True) spl2 = dumps(dict(pl2, headers=headers_after)) self.channel._do_restore_message( pl2, 'ex', 'rkey', client, ) client.rpush.assert_any_call('george', spl2) client.rpush.assert_any_call('elaine', spl2) client.rpush.side_effect = KeyError() with patch('kombu.transport.redis.crit') as crit: self.channel._do_restore_message( pl2, 'ex', 'rkey', client, ) crit.assert_called() def test_restore(self): message = Mock(name='message') with patch('kombu.transport.redis.loads') as loads: loads.return_value = 'M', 'EX', 'RK' client = self.channel._create_client = Mock(name='client') client = client() client.pipeline = ContextMock() restore = self.channel._do_restore_message = Mock( name='_do_restore_message', ) pipe = client.pipeline.return_value pipe_hget = Mock(name='pipe.hget') pipe.hget.return_value = pipe_hget pipe_hget_hdel = Mock(name='pipe.hget.hdel') pipe_hget.hdel.return_value = pipe_hget_hdel result = Mock(name='result') pipe_hget_hdel.execute.return_value = None, None self.channel._restore(message) client.pipeline.assert_called_with() unacked_key = self.channel.unacked_key loads.assert_not_called() tag = message.delivery_tag pipe.hget.assert_called_with(unacked_key, tag) pipe_hget.hdel.assert_called_with(unacked_key, tag) pipe_hget_hdel.execute.assert_called_with() pipe_hget_hdel.execute.return_value = result, None self.channel._restore(message) loads.assert_called_with(result) restore.assert_called_with('M', 'EX', 'RK', client, False) def test_qos_restore_visible(self): client = self.channel._create_client = Mock(name='client') client = client() def pipe(*args, **kwargs): return Pipeline(client) client.pipeline = pipe client.zrevrangebyscore.return_value = [ (1, 10), (2, 20), (3, 30), ] qos = redis.QoS(self.channel) restore = qos.restore_by_tag = Mock(name='restore_by_tag') qos._vrestore_count = 1 qos.restore_visible() client.zrevrangebyscore.assert_not_called() assert qos._vrestore_count == 2 qos._vrestore_count = 0 qos.restore_visible() restore.assert_has_calls([ call(1, client), call(2, client), call(3, client), ]) assert qos._vrestore_count == 1 qos._vrestore_count = 0 restore.reset_mock() client.zrevrangebyscore.return_value = [] qos.restore_visible() restore.assert_not_called() assert qos._vrestore_count == 1 qos._vrestore_count = 0 client.setnx.side_effect = redis.MutexHeld() qos.restore_visible() def test_basic_consume_when_fanout_queue(self): self.channel.exchange_declare(exchange='txconfan', type='fanout') self.channel.queue_declare(queue='txconfanq') self.channel.queue_bind(queue='txconfanq', exchange='txconfan') assert 'txconfanq' in self.channel._fanout_queues self.channel.basic_consume('txconfanq', False, None, 1) assert 'txconfanq' in self.channel.active_fanout_queues assert self.channel._fanout_to_queue.get('txconfan') == 'txconfanq' def test_basic_cancel_unknown_delivery_tag(self): assert self.channel.basic_cancel('txaseqwewq') is None def test_subscribe_no_queues(self): self.channel.subclient = Mock() self.channel.active_fanout_queues.clear() self.channel._subscribe() self.channel.subclient.subscribe.assert_not_called() def test_subscribe(self): self.channel.subclient = Mock() self.channel.active_fanout_queues.add('a') self.channel.active_fanout_queues.add('b') self.channel._fanout_queues.update(a=('a', ''), b=('b', '')) self.channel._subscribe() self.channel.subclient.psubscribe.assert_called() s_args, _ = self.channel.subclient.psubscribe.call_args assert sorted(s_args[0]) == ['/{db}.a', '/{db}.b'] self.channel.subclient.connection._sock = None self.channel._subscribe() self.channel.subclient.connection.connect.assert_called_with() def test_handle_unsubscribe_message(self): s = self.channel.subclient s.subscribed = True self.channel._handle_message(s, ['unsubscribe', 'a', 0]) assert not s.subscribed def test_handle_pmessage_message(self): res = self.channel._handle_message( self.channel.subclient, ['pmessage', 'pattern', 'channel', 'data'], ) assert res == { 'type': 'pmessage', 'pattern': 'pattern', 'channel': 'channel', 'data': 'data', } def test_handle_message(self): res = self.channel._handle_message( self.channel.subclient, ['type', 'channel', 'data'], ) assert res == { 'type': 'type', 'pattern': None, 'channel': 'channel', 'data': 'data', } def test_brpop_start_but_no_queues(self): assert self.channel._brpop_start() is None def test_receive(self): s = self.channel.subclient = Mock() self.channel._fanout_to_queue['a'] = 'b' self.channel.connection._deliver = Mock(name='_deliver') message = { 'body': 'hello', 'properties': { 'delivery_tag': 1, 'delivery_info': {'exchange': 'E', 'routing_key': 'R'}, }, } s.parse_response.return_value = ['message', 'a', dumps(message)] self.channel._receive_one(self.channel.subclient) self.channel.connection._deliver.assert_called_once_with( message, 'b', ) def test_receive_raises_for_connection_error(self): self.channel._in_listen = True s = self.channel.subclient = Mock() s.parse_response.side_effect = KeyError('foo') with pytest.raises(KeyError): self.channel._receive_one(self.channel.subclient) assert not self.channel._in_listen def test_receive_empty(self): s = self.channel.subclient = Mock() s.parse_response.return_value = None assert self.channel._receive_one(self.channel.subclient) is None def test_receive_different_message_Type(self): s = self.channel.subclient = Mock() s.parse_response.return_value = ['message', '/foo/', 0, 'data'] assert self.channel._receive_one(self.channel.subclient) is None def test_brpop_read_raises(self): c = self.channel.client = Mock() c.parse_response.side_effect = KeyError('foo') with pytest.raises(KeyError): self.channel._brpop_read() c.connection.disconnect.assert_called_with() def test_brpop_read_gives_None(self): c = self.channel.client = Mock() c.parse_response.return_value = None with pytest.raises(redis.Empty): self.channel._brpop_read() def test_poll_error(self): c = self.channel.client = Mock() c.parse_response = Mock() self.channel._poll_error('BRPOP') c.parse_response.assert_called_with(c.connection, 'BRPOP') c.parse_response.side_effect = KeyError('foo') with pytest.raises(KeyError): self.channel._poll_error('BRPOP') def test_poll_error_on_type_LISTEN(self): c = self.channel.subclient = Mock() c.parse_response = Mock() self.channel._poll_error('LISTEN') c.parse_response.assert_called_with() c.parse_response.side_effect = KeyError('foo') with pytest.raises(KeyError): self.channel._poll_error('LISTEN') def test_put_fanout(self): self.channel._in_poll = False c = self.channel._create_client = Mock() body = {'hello': 'world'} self.channel._put_fanout('exchange', body, '') c().publish.assert_called_with('/{db}.exchange', dumps(body)) def test_put_priority(self): client = self.channel._create_client = Mock(name='client') msg1 = {'properties': {'priority': 3}} self.channel._put('george', msg1) client().lpush.assert_called_with( self.channel._q_for_pri('george', 3), dumps(msg1), ) msg2 = {'properties': {'priority': 313}} self.channel._put('george', msg2) client().lpush.assert_called_with( self.channel._q_for_pri('george', 9), dumps(msg2), ) msg3 = {'properties': {}} self.channel._put('george', msg3) client().lpush.assert_called_with( self.channel._q_for_pri('george', 0), dumps(msg3), ) def test_delete(self): x = self.channel x._create_client = Mock() x._create_client.return_value = x.client delete = x.client.delete = Mock() srem = x.client.srem = Mock() x._delete('queue', 'exchange', 'routing_key', None) delete.assert_has_calls([ call(x._q_for_pri('queue', pri)) for pri in redis.PRIORITY_STEPS ]) srem.assert_called_with(x.keyprefix_queue % ('exchange',), x.sep.join(['routing_key', '', 'queue'])) def test_has_queue(self): self.channel._create_client = Mock() self.channel._create_client.return_value = self.channel.client exists = self.channel.client.exists = Mock() exists.return_value = True assert self.channel._has_queue('foo') exists.assert_has_calls([ call(self.channel._q_for_pri('foo', pri)) for pri in redis.PRIORITY_STEPS ]) exists.return_value = False assert not self.channel._has_queue('foo') def test_close_when_closed(self): self.channel.closed = True self.channel.close() def test_close_deletes_autodelete_fanout_queues(self): self.channel._fanout_queues = {'foo': ('foo', ''), 'bar': ('bar', '')} self.channel.auto_delete_queues = ['foo'] self.channel.queue_delete = Mock(name='queue_delete') client = self.channel.client self.channel.close() self.channel.queue_delete.assert_has_calls([ call('foo', client=client), ]) def test_close_client_close_raises(self): c = self.channel.client = Mock() connection = c.connection connection.disconnect.side_effect = self.channel.ResponseError() self.channel.close() connection.disconnect.assert_called_with() def test_invalid_database_raises_ValueError(self): with pytest.raises(ValueError): self.channel.connection.client.virtual_host = 'dwqeq' self.channel._connparams() def test_connparams_allows_slash_in_db(self): self.channel.connection.client.virtual_host = '/123' assert self.channel._connparams()['db'] == 123 def test_connparams_db_can_be_int(self): self.channel.connection.client.virtual_host = 124 assert self.channel._connparams()['db'] == 124 def test_new_queue_with_auto_delete(self): redis.Channel._new_queue(self.channel, 'george', auto_delete=False) assert 'george' not in self.channel.auto_delete_queues redis.Channel._new_queue(self.channel, 'elaine', auto_delete=True) assert 'elaine' in self.channel.auto_delete_queues def test_connparams_regular_hostname(self): self.channel.connection.client.hostname = 'george.vandelay.com' assert self.channel._connparams()['host'] == 'george.vandelay.com' def test_connparams_password_for_unix_socket(self): self.channel.connection.client.hostname = \ 'socket://:foo@/var/run/redis.sock' connection_parameters = self.channel._connparams() password = connection_parameters['password'] path = connection_parameters['path'] assert (password, path) == ('foo', '/var/run/redis.sock') self.channel.connection.client.hostname = \ 'socket://@/var/run/redis.sock' connection_parameters = self.channel._connparams() password = connection_parameters['password'] path = connection_parameters['path'] assert (password, path) == (None, '/var/run/redis.sock') def test_rotate_cycle_ValueError(self): cycle = self.channel._queue_cycle cycle.update(['kramer', 'jerry']) cycle.rotate('kramer') assert cycle.items, ['jerry' == 'kramer'] cycle.rotate('elaine') def test_get_client(self): import redis as R KombuRedis = redis.Channel._get_client(self.channel) assert KombuRedis Rv = getattr(R, 'VERSION', None) try: R.VERSION = (2, 4, 0) with pytest.raises(VersionMismatch): redis.Channel._get_client(self.channel) finally: if Rv is not None: R.VERSION = Rv def test_get_response_error(self): from redis.exceptions import ResponseError assert redis.Channel._get_response_error(self.channel) is ResponseError def test_avail_client(self): self.channel._pool = Mock() cc = self.channel._create_client = Mock() with self.channel.conn_or_acquire(): pass cc.assert_called_with() def test_register_with_event_loop(self): transport = self.connection.transport transport.cycle = Mock(name='cycle') transport.cycle.fds = {12: 'LISTEN', 13: 'BRPOP'} conn = Mock(name='conn') loop = Mock(name='loop') redis.Transport.register_with_event_loop(transport, conn, loop) transport.cycle.on_poll_init.assert_called_with(loop.poller) loop.call_repeatedly.assert_called_with( 10, transport.cycle.maybe_restore_messages, ) loop.on_tick.add.assert_called() on_poll_start = loop.on_tick.add.call_args[0][0] on_poll_start() transport.cycle.on_poll_start.assert_called_with() loop.add_reader.assert_has_calls([ call(12, transport.on_readable, 12), call(13, transport.on_readable, 13), ]) def test_transport_on_readable(self): transport = self.connection.transport cycle = transport.cycle = Mock(name='cyle') cycle.on_readable.return_value = None redis.Transport.on_readable(transport, 13) cycle.on_readable.assert_called_with(13) def test_transport_get_errors(self): assert redis.Transport._get_errors(self.connection.transport) def test_transport_driver_version(self): assert redis.Transport.driver_version(self.connection.transport) def test_transport_get_errors_when_InvalidData_used(self): from redis import exceptions class ID(Exception): pass DataError = getattr(exceptions, 'DataError', None) InvalidData = getattr(exceptions, 'InvalidData', None) exceptions.InvalidData = ID exceptions.DataError = None try: errors = redis.Transport._get_errors(self.connection.transport) assert errors assert ID in errors[1] finally: if DataError is not None: exceptions.DataError = DataError if InvalidData is not None: exceptions.InvalidData = InvalidData def test_empty_queues_key(self): channel = self.channel channel._in_poll = False key = channel.keyprefix_queue % 'celery' # Everything is fine, there is a list of queues. channel.client.sadd(key, 'celery\x06\x16\x06\x16celery') assert channel.get_table('celery') == [ ('celery', '', 'celery'), ] # ... then for some reason, the _kombu.binding.celery key gets lost channel.client.srem(key) # which raises a channel error so that the consumer/publisher # can recover by redeclaring the required entities. with pytest.raises(InconsistencyError): self.channel.get_table('celery') def test_socket_connection(self): with patch('kombu.transport.redis.Channel._create_client'): with Connection('redis+socket:///tmp/redis.sock') as conn: connparams = conn.default_channel._connparams() assert issubclass( connparams['connection_class'], redis.redis.UnixDomainSocketConnection, ) assert connparams['path'] == '/tmp/redis.sock' def test_ssl_argument__dict(self): with patch('kombu.transport.redis.Channel._create_client'): # Expected format for redis-py's SSLConnection class ssl_params = { 'ssl_cert_reqs': 2, 'ssl_ca_certs': '/foo/ca.pem', 'ssl_certfile': '/foo/cert.crt', 'ssl_keyfile': '/foo/pkey.key' } with Connection('redis://', ssl=ssl_params) as conn: params = conn.default_channel._connparams() assert params['ssl_cert_reqs'] == ssl_params['ssl_cert_reqs'] assert params['ssl_ca_certs'] == ssl_params['ssl_ca_certs'] assert params['ssl_certfile'] == ssl_params['ssl_certfile'] assert params['ssl_keyfile'] == ssl_params['ssl_keyfile'] assert params.get('ssl') is None def test_ssl_connection(self): with patch('kombu.transport.redis.Channel._create_client'): with Connection('redis://', ssl={'ssl_cert_reqs': 2}) as conn: connparams = conn.default_channel._connparams() assert issubclass( connparams['connection_class'], redis.redis.SSLConnection, ) @skip.unless_module('redis') class test_Redis: def setup(self): self.connection = Connection(transport=Transport) self.exchange = Exchange('test_Redis', type='direct') self.queue = Queue('test_Redis', self.exchange, 'test_Redis') def teardown(self): self.connection.close() def test_publish__get(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() producer.publish({'hello': 'world'}) assert self.queue(channel).get().payload == {'hello': 'world'} assert self.queue(channel).get() is None assert self.queue(channel).get() is None assert self.queue(channel).get() is None def test_publish__consume(self): connection = Connection(transport=Transport) channel = connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') consumer = Consumer(channel, queues=[self.queue]) producer.publish({'hello2': 'world2'}) _received = [] def callback(message_data, message): _received.append(message_data) message.ack() consumer.register_callback(callback) consumer.consume() assert channel in channel.connection.cycle._channels try: connection.drain_events(timeout=1) assert _received with pytest.raises(socket.timeout): connection.drain_events(timeout=0.01) finally: channel.close() def test_purge(self): channel = self.connection.channel() producer = Producer(channel, self.exchange, routing_key='test_Redis') self.queue(channel).declare() for i in range(10): producer.publish({'hello': 'world-%s' % (i,)}) assert channel._size('test_Redis') == 10 assert self.queue(channel).purge() == 10 channel.close() def test_db_values(self): Connection(virtual_host=1, transport=Transport).channel() Connection(virtual_host='1', transport=Transport).channel() Connection(virtual_host='/1', transport=Transport).channel() with pytest.raises(Exception): Connection('redis:///foo').channel() def test_db_port(self): c1 = Connection(port=None, transport=Transport).channel() c1.close() c2 = Connection(port=9999, transport=Transport).channel() c2.close() def test_close_poller_not_active(self): c = Connection(transport=Transport).channel() cycle = c.connection.cycle c.client.connection c.close() assert c not in cycle._channels def test_close_ResponseError(self): c = Connection(transport=Transport).channel() c.client.bgsave_raises_ResponseError = True c.close() def test_close_disconnects(self): c = Connection(transport=Transport).channel() conn1 = c.client.connection conn2 = c.subclient.connection c.close() assert conn1.disconnected assert conn2.disconnected def test_get__Empty(self): channel = self.connection.channel() with pytest.raises(Empty): channel._get('does-not-exist') channel.close() def test_get_client(self): with mock.module_exists(*_redis_modules()): conn = Connection(transport=Transport) chan = conn.channel() assert chan.Client assert chan.ResponseError assert conn.transport.connection_errors assert conn.transport.channel_errors def test_check_at_least_we_try_to_connect_and_fail(self): import redis connection = Connection('redis://localhost:65534/') with pytest.raises(redis.exceptions.ConnectionError): chan = connection.channel() chan._size('some_queue') def _redis_modules(): class ConnectionError(Exception): pass class AuthenticationError(Exception): pass class InvalidData(Exception): pass class InvalidResponse(Exception): pass class ResponseError(Exception): pass exceptions = types.ModuleType(bytes_if_py2('redis.exceptions')) exceptions.ConnectionError = ConnectionError exceptions.AuthenticationError = AuthenticationError exceptions.InvalidData = InvalidData exceptions.InvalidResponse = InvalidResponse exceptions.ResponseError = ResponseError class Redis(object): pass myredis = types.ModuleType(bytes_if_py2('redis')) myredis.exceptions = exceptions myredis.Redis = Redis return myredis, exceptions @skip.unless_module('redis') class test_MultiChannelPoller: def setup(self): self.Poller = redis.MultiChannelPoller def test_on_poll_start(self): p = self.Poller() p._channels = [] p.on_poll_start() p._register_BRPOP = Mock(name='_register_BRPOP') p._register_LISTEN = Mock(name='_register_LISTEN') chan1 = Mock(name='chan1') p._channels = [chan1] chan1.active_queues = [] chan1.active_fanout_queues = [] p.on_poll_start() chan1.active_queues = ['q1'] chan1.active_fanout_queues = ['q2'] chan1.qos.can_consume.return_value = False p.on_poll_start() p._register_LISTEN.assert_called_with(chan1) p._register_BRPOP.assert_not_called() chan1.qos.can_consume.return_value = True p._register_LISTEN.reset_mock() p.on_poll_start() p._register_BRPOP.assert_called_with(chan1) p._register_LISTEN.assert_called_with(chan1) def test_on_poll_init(self): p = self.Poller() chan1 = Mock(name='chan1') p._channels = [] poller = Mock(name='poller') p.on_poll_init(poller) assert p.poller is poller p._channels = [chan1] p.on_poll_init(poller) chan1.qos.restore_visible.assert_called_with( num=chan1.unacked_restore_limit, ) def test_handle_event(self): p = self.Poller() chan = Mock(name='chan') p._fd_to_chan[13] = chan, 'BRPOP' chan.handlers = {'BRPOP': Mock(name='BRPOP')} chan.qos.can_consume.return_value = False p.handle_event(13, redis.READ) chan.handlers['BRPOP'].assert_not_called() chan.qos.can_consume.return_value = True p.handle_event(13, redis.READ) chan.handlers['BRPOP'].assert_called_with() p.handle_event(13, redis.ERR) chan._poll_error.assert_called_with('BRPOP') p.handle_event(13, ~(redis.READ | redis.ERR)) def test_fds(self): p = self.Poller() p._fd_to_chan = {1: 2} assert p.fds == p._fd_to_chan def test_close_unregisters_fds(self): p = self.Poller() poller = p.poller = Mock() p._chan_to_sock.update({1: 1, 2: 2, 3: 3}) p.close() assert poller.unregister.call_count == 3 u_args = poller.unregister.call_args_list assert sorted(u_args) == [ ((1,), {}), ((2,), {}), ((3,), {}), ] def test_close_when_unregister_raises_KeyError(self): p = self.Poller() p.poller = Mock() p._chan_to_sock.update({1: 1}) p.poller.unregister.side_effect = KeyError(1) p.close() def test_close_resets_state(self): p = self.Poller() p.poller = Mock() p._channels = Mock() p._fd_to_chan = Mock() p._chan_to_sock = Mock() p._chan_to_sock.itervalues.return_value = [] p._chan_to_sock.values.return_value = [] # py3k p.close() p._channels.clear.assert_called_with() p._fd_to_chan.clear.assert_called_with() p._chan_to_sock.clear.assert_called_with() def test_register_when_registered_reregisters(self): p = self.Poller() p.poller = Mock() channel, client, type = Mock(), Mock(), Mock() sock = client.connection._sock = Mock() sock.fileno.return_value = 10 p._chan_to_sock = {(channel, client, type): 6} p._register(channel, client, type) p.poller.unregister.assert_called_with(6) assert p._fd_to_chan[10] == (channel, type) assert p._chan_to_sock[(channel, client, type)] == sock p.poller.register.assert_called_with(sock, p.eventflags) # when client not connected yet client.connection._sock = None def after_connected(): client.connection._sock = Mock() client.connection.connect.side_effect = after_connected p._register(channel, client, type) client.connection.connect.assert_called_with() def test_register_BRPOP(self): p = self.Poller() channel = Mock() channel.client.connection._sock = None p._register = Mock() channel._in_poll = False p._register_BRPOP(channel) assert channel._brpop_start.call_count == 1 assert p._register.call_count == 1 channel.client.connection._sock = Mock() p._chan_to_sock[(channel, channel.client, 'BRPOP')] = True channel._in_poll = True p._register_BRPOP(channel) assert channel._brpop_start.call_count == 1 assert p._register.call_count == 1 def test_register_LISTEN(self): p = self.Poller() channel = Mock() channel.subclient.connection._sock = None channel._in_listen = False p._register = Mock() p._register_LISTEN(channel) p._register.assert_called_with(channel, channel.subclient, 'LISTEN') assert p._register.call_count == 1 assert channel._subscribe.call_count == 1 channel._in_listen = True p._chan_to_sock[(channel, channel.subclient, 'LISTEN')] = 3 channel.subclient.connection._sock = Mock() p._register_LISTEN(channel) assert p._register.call_count == 1 assert channel._subscribe.call_count == 1 def create_get(self, events=None, queues=None, fanouts=None): _pr = [] if events is None else events _aq = [] if queues is None else queues _af = [] if fanouts is None else fanouts p = self.Poller() p.poller = Mock() p.poller.poll.return_value = _pr p._register_BRPOP = Mock() p._register_LISTEN = Mock() channel = Mock() p._channels = [channel] channel.active_queues = _aq channel.active_fanout_queues = _af return p, channel def test_get_no_actions(self): p, channel = self.create_get() with pytest.raises(redis.Empty): p.get(Mock()) def test_qos_reject(self): p, channel = self.create_get() qos = redis.QoS(channel) qos.ack = Mock(name='Qos.ack') qos.reject(1234) qos.ack.assert_called_with(1234) def test_get_brpop_qos_allow(self): p, channel = self.create_get(queues=['a_queue']) channel.qos.can_consume.return_value = True with pytest.raises(redis.Empty): p.get(Mock()) p._register_BRPOP.assert_called_with(channel) def test_get_brpop_qos_disallow(self): p, channel = self.create_get(queues=['a_queue']) channel.qos.can_consume.return_value = False with pytest.raises(redis.Empty): p.get(Mock()) p._register_BRPOP.assert_not_called() def test_get_listen(self): p, channel = self.create_get(fanouts=['f_queue']) with pytest.raises(redis.Empty): p.get(Mock()) p._register_LISTEN.assert_called_with(channel) def test_get_receives_ERR(self): p, channel = self.create_get(events=[(1, eventio.ERR)]) p._fd_to_chan[1] = (channel, 'BRPOP') with pytest.raises(redis.Empty): p.get(Mock()) channel._poll_error.assert_called_with('BRPOP') def test_get_receives_multiple(self): p, channel = self.create_get(events=[(1, eventio.ERR), (1, eventio.ERR)]) p._fd_to_chan[1] = (channel, 'BRPOP') with pytest.raises(redis.Empty): p.get(Mock()) channel._poll_error.assert_called_with('BRPOP') @skip.unless_module('redis') class test_Mutex: def test_mutex(self, lock_id='xxx'): client = Mock(name='client') with patch('kombu.transport.redis.uuid') as uuid: # Won uuid.return_value = lock_id client.setnx.return_value = True client.pipeline = ContextMock() pipe = client.pipeline.return_value pipe.get.return_value = lock_id held = False with redis.Mutex(client, 'foo1', 100): held = True assert held client.setnx.assert_called_with('foo1', lock_id) pipe.get.return_value = 'yyy' held = False with redis.Mutex(client, 'foo1', 100): held = True assert held # Did not win client.expire.reset_mock() pipe.get.return_value = lock_id client.setnx.return_value = False with pytest.raises(redis.MutexHeld): held = False with redis.Mutex(client, 'foo1', '100'): held = True assert not held client.ttl.return_value = 0 with pytest.raises(redis.MutexHeld): held = False with redis.Mutex(client, 'foo1', '100'): held = True assert not held client.expire.assert_called() # Wins but raises WatchError (and that is ignored) client.setnx.return_value = True pipe.watch.side_effect = redis.redis.WatchError() held = False with redis.Mutex(client, 'foo1', 100): held = True assert held @skip.unless_module('redis.sentinel') class test_RedisSentinel: def test_method_called(self): from kombu.transport.redis import SentinelChannel with patch.object(SentinelChannel, '_sentinel_managed_pool') as p: connection = Connection( 'sentinel://localhost:65534/', transport_options={ 'master_name': 'not_important', }, ) connection.channel() p.assert_called() def test_getting_master_from_sentinel(self): with patch('redis.sentinel.Sentinel') as patched: connection = Connection( 'sentinel://localhost:65534/', transport_options={ 'master_name': 'not_important', }, ) connection.channel() assert patched master_for = patched.return_value.master_for master_for.assert_called() master_for.assert_called_with('not_important', ANY) master_for().connection_pool.get_connection.assert_called() def test_can_create_connection(self): from redis.exceptions import ConnectionError connection = Connection( 'sentinel://localhost:65534/', transport_options={ 'master_name': 'not_important', }, ) with pytest.raises(ConnectionError): connection.channel() kombu-4.1.0/t/unit/transport/test_filesystem.py0000644000175000017500000001140513130603207021626 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import tempfile from case import skip from case.skip import SkipTest from kombu import Connection, Exchange, Queue, Consumer, Producer @skip.if_win32() class test_FilesystemTransport: def setup(self): self.channels = set() try: data_folder_in = tempfile.mkdtemp() data_folder_out = tempfile.mkdtemp() except Exception: raise SkipTest('filesystem transport: cannot create tempfiles') self.c = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_in, 'data_folder_out': data_folder_out, }) self.channels.add(self.c.default_channel) self.p = Connection(transport='filesystem', transport_options={ 'data_folder_in': data_folder_out, 'data_folder_out': data_folder_in, }) self.channels.add(self.p.default_channel) self.e = Exchange('test_transport_filesystem') self.q = Queue('test_transport_filesystem', exchange=self.e, routing_key='test_transport_filesystem') self.q2 = Queue('test_transport_filesystem2', exchange=self.e, routing_key='test_transport_filesystem2') def teardown(self): # make sure we don't attempt to restore messages at shutdown. for channel in self.channels: try: channel._qos._dirty.clear() except AttributeError: pass try: channel._qos._delivered.clear() except AttributeError: pass def _add_channel(self, channel): self.channels.add(channel) return channel def test_produce_consume_noack(self): producer = Producer(self._add_channel(self.p.channel()), self.e) consumer = Consumer(self._add_channel(self.c.channel()), self.q, no_ack=True) for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') _received = [] def callback(message_data, message): _received.append(message) consumer.register_callback(callback) consumer.consume() while 1: if len(_received) == 10: break self.c.drain_events() assert len(_received) == 10 def test_produce_consume(self): producer_channel = self._add_channel(self.p.channel()) consumer_channel = self._add_channel(self.c.channel()) producer = Producer(producer_channel, self.e) consumer1 = Consumer(consumer_channel, self.q) consumer2 = Consumer(consumer_channel, self.q2) self.q2(consumer_channel).declare() for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem2') _received1 = [] _received2 = [] def callback1(message_data, message): _received1.append(message) message.ack() def callback2(message_data, message): _received2.append(message) message.ack() consumer1.register_callback(callback1) consumer2.register_callback(callback2) consumer1.consume() consumer2.consume() while 1: if len(_received1) + len(_received2) == 20: break self.c.drain_events() assert len(_received1) + len(_received2) == 20 # compression producer.publish({'compressed': True}, routing_key='test_transport_filesystem', compression='zlib') m = self.q(consumer_channel).get() assert m.payload == {'compressed': True} # queue.delete for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem') assert self.q(consumer_channel).get() self.q(consumer_channel).delete() self.q(consumer_channel).declare() assert self.q(consumer_channel).get() is None # queue.purge for i in range(10): producer.publish({'foo': i}, routing_key='test_transport_filesystem2') assert self.q2(consumer_channel).get() self.q2(consumer_channel).purge() assert self.q2(consumer_channel).get() is None kombu-4.1.0/t/unit/transport/test_qpid.py0000644000175000017500000021330713130603207020404 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import select import ssl import socket import sys import time import uuid from collections import Callable, OrderedDict from itertools import count from case import Mock, call, patch, skip from kombu.five import Empty, keys, range, monotonic from kombu.transport.qpid import (AuthenticationFailure, Channel, Connection, ConnectionError, Message, NotFound, QoS, Transport) from kombu.transport.virtual import Base64 QPID_MODULE = 'kombu.transport.qpid' @pytest.fixture def disable_runtime_dependency_check(patching): mock_dependency_is_none = patching(QPID_MODULE + '.dependency_is_none') mock_dependency_is_none.return_value = False return mock_dependency_is_none class ExtraAssertionsMixin(object): """A mixin class adding assertDictEqual and assertDictContainsSubset""" def assertDictEqual(self, a, b, msg=None): """ Test that two dictionaries are equal. Implemented here because this method was not available until Python 2.6. This asserts that the unique set of keys are the same in a and b. Also asserts that the value of each key is the same in a and b using the is operator. """ assert set(keys(a)) == set(keys(b)) for key in keys(a): assert a[key] == b[key] def assertDictContainsSubset(self, a, b, msg=None): """ Assert that all the key/value pairs in a exist in b. """ for key in keys(a): assert key in b assert a[key] == b[key] class QpidException(Exception): """ An object used to mock Exceptions provided by qpid.messaging.exceptions """ def __init__(self, code=None, text=None): super(Exception, self).__init__(self) self.code = code self.text = text class BreakOutException(Exception): pass @skip.if_python3() @skip.if_pypy() class test_QoS__init__(object): def setup(self): self.mock_session = Mock() self.qos = QoS(self.mock_session) def test__init__prefetch_default_set_correct_without_prefetch_value(self): assert self.qos.prefetch_count == 1 def test__init__prefetch_is_hard_set_to_one(self): qos_limit_two = QoS(self.mock_session) assert qos_limit_two.prefetch_count == 1 def test__init___not_yet_acked_is_initialized(self): assert isinstance(self.qos._not_yet_acked, OrderedDict) @skip.if_python3() @skip.if_pypy() class test_QoS_can_consume(object): def setup(self): session = Mock() self.qos = QoS(session) def test_True_when_prefetch_limit_is_zero(self): self.qos.prefetch_count = 0 self.qos._not_yet_acked = [] assert self.qos.can_consume() def test_True_when_len_of__not_yet_acked_is_lt_prefetch_count(self): self.qos.prefetch_count = 3 self.qos._not_yet_acked = ['a', 'b'] assert self.qos.can_consume() def test_False_when_len_of__not_yet_acked_is_eq_prefetch_count(self): self.qos.prefetch_count = 3 self.qos._not_yet_acked = ['a', 'b', 'c'] assert not self.qos.can_consume() @skip.if_python3() @skip.if_pypy() class test_QoS_can_consume_max_estimate(object): def setup(self): self.mock_session = Mock() self.qos = QoS(self.mock_session) def test_return_one_when_prefetch_count_eq_zero(self): self.qos.prefetch_count = 0 assert self.qos.can_consume_max_estimate() == 1 def test_return_prefetch_count_sub_len__not_yet_acked(self): self.qos._not_yet_acked = ['a', 'b'] self.qos.prefetch_count = 4 assert self.qos.can_consume_max_estimate() == 2 @skip.if_python3() @skip.if_pypy() class test_QoS_ack(object): def setup(self): self.mock_session = Mock() self.qos = QoS(self.mock_session) def test_ack_pops__not_yet_acked(self): message = Mock() self.qos.append(message, 1) assert 1 in self.qos._not_yet_acked self.qos.ack(1) assert 1 not in self.qos._not_yet_acked def test_ack_calls_session_acknowledge_with_message(self): message = Mock() self.qos.append(message, 1) self.qos.ack(1) self.qos.session.acknowledge.assert_called_with(message=message) @skip.if_python3() @skip.if_pypy() class test_QoS_reject(object): @pytest.fixture(autouse=True) def setup_qpid(self, patching): self.mock_qpid = patching(QPID_MODULE + '.qpid') self.mock_Disposition = self.mock_qpid.messaging.Disposition self.mock_RELEASED = self.mock_qpid.messaging.RELEASED self.mock_REJECTED = self.mock_qpid.messaging.REJECTED def setup(self): self.mock_session = Mock() self.mock_message = Mock() self.qos = QoS(self.mock_session) def test_reject_pops__not_yet_acked(self): self.qos.append(self.mock_message, 1) assert 1 in self.qos._not_yet_acked self.qos.reject(1) assert 1 not in self.qos._not_yet_acked def test_reject_requeue_true(self): self.qos.append(self.mock_message, 1) self.qos.reject(1, requeue=True) self.mock_Disposition.assert_called_with(self.mock_RELEASED) self.qos.session.acknowledge.assert_called_with( message=self.mock_message, disposition=self.mock_Disposition.return_value, ) def test_reject_requeue_false(self): message = Mock() self.qos.append(message, 1) self.qos.reject(1, requeue=False) self.mock_Disposition.assert_called_with(self.mock_REJECTED) self.qos.session.acknowledge.assert_called_with( message=message, disposition=self.mock_Disposition.return_value, ) @skip.if_python3() @skip.if_pypy() class test_QoS(object): def mock_message_factory(self): """Create and return a mock message tag and delivery_tag.""" m_delivery_tag = self.delivery_tag_generator.next() m = 'message %s' % (m_delivery_tag, ) return m, m_delivery_tag def add_n_messages_to_qos(self, n, qos): """Add N mock messages into the passed in qos object""" for i in range(n): self.add_message_to_qos(qos) def add_message_to_qos(self, qos): """Add a single mock message into the passed in qos object. Uses the mock_message_factory() to create the message and delivery_tag. """ m, m_delivery_tag = self.mock_message_factory() qos.append(m, m_delivery_tag) def setup(self): self.mock_session = Mock() self.qos_no_limit = QoS(self.mock_session) self.qos_limit_2 = QoS(self.mock_session, prefetch_count=2) self.delivery_tag_generator = count(1) def test_append(self): """Append two messages and check inside the QoS object that they were put into the internal data structures correctly """ qos = self.qos_no_limit m1, m1_tag = self.mock_message_factory() m2, m2_tag = self.mock_message_factory() qos.append(m1, m1_tag) length_not_yet_acked = len(qos._not_yet_acked) assert length_not_yet_acked == 1 checked_message1 = qos._not_yet_acked[m1_tag] assert m1 is checked_message1 qos.append(m2, m2_tag) length_not_yet_acked = len(qos._not_yet_acked) assert length_not_yet_acked == 2 checked_message2 = qos._not_yet_acked[m2_tag] assert m2 is checked_message2 def test_get(self): """Append two messages, and use get to receive them""" qos = self.qos_no_limit m1, m1_tag = self.mock_message_factory() m2, m2_tag = self.mock_message_factory() qos.append(m1, m1_tag) qos.append(m2, m2_tag) message1 = qos.get(m1_tag) message2 = qos.get(m2_tag) assert m1 is message1 assert m2 is message2 @skip.if_python3() @skip.if_pypy() class ConnectionTestBase(object): @patch(QPID_MODULE + '.qpid') def setup(self, mock_qpid): self.connection_options = { 'host': 'localhost', 'port': 5672, 'transport': 'tcp', 'timeout': 10, 'sasl_mechanisms': 'ANONYMOUS', } self.mock_qpid_connection = mock_qpid.messaging.Connection self.conn = Connection(**self.connection_options) @skip.if_python3() @skip.if_pypy() class test_Connection__init__(ExtraAssertionsMixin, ConnectionTestBase): def test_stores_connection_options(self): # ensure that only one mech was passed into connection. The other # options should all be passed through as-is modified_conn_opts = self.connection_options self.assertDictEqual( modified_conn_opts, self.conn.connection_options, ) def test_class_variables(self): assert isinstance(self.conn.channels, list) assert isinstance(self.conn._callbacks, dict) def test_establishes_connection(self): modified_conn_opts = self.connection_options self.mock_qpid_connection.establish.assert_called_with( **modified_conn_opts ) def test_saves_established_connection(self): created_conn = self.mock_qpid_connection.establish.return_value assert self.conn._qpid_conn is created_conn @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) @patch(QPID_MODULE + '.sys.exc_info') @patch(QPID_MODULE + '.qpid') def test_mutates_ConnError_by_message(self, mock_qpid, mock_exc_info): text = 'connection-forced: Authentication failed(320)' my_conn_error = QpidException(text=text) mock_qpid.messaging.Connection.establish.side_effect = my_conn_error mock_exc_info.return_value = 'a', 'b', None try: self.conn = Connection(**self.connection_options) except AuthenticationFailure as error: exc_info = sys.exc_info() assert not isinstance(error, QpidException) assert exc_info[1] == 'b' assert exc_info[2] is None else: self.fail('ConnectionError type was not mutated correctly') @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) @patch(QPID_MODULE + '.sys.exc_info') @patch(QPID_MODULE + '.qpid') def test_mutates_ConnError_by_code(self, mock_qpid, mock_exc_info): my_conn_error = QpidException(code=320, text='someothertext') mock_qpid.messaging.Connection.establish.side_effect = my_conn_error mock_exc_info.return_value = 'a', 'b', None try: self.conn = Connection(**self.connection_options) except AuthenticationFailure as error: exc_info = sys.exc_info() assert not isinstance(error, QpidException) assert exc_info[1] == 'b' assert exc_info[2] is None else: self.fail('ConnectionError type was not mutated correctly') @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) @patch(QPID_MODULE + '.sys.exc_info') @patch(QPID_MODULE + '.qpid') def test_connection__init__mutates_ConnError_by_message2(self, mock_qpid, mock_exc_info): """ Test for PLAIN connection via python-saslwrapper, sans cyrus-sasl-plain This test is specific for what is returned when we attempt to connect with PLAIN mech and python-saslwrapper is installed, but cyrus-sasl-plain is not installed. """ my_conn_error = QpidException() my_conn_error.text = 'Error in sasl_client_start (-4) SASL(-4): no '\ 'mechanism available' mock_qpid.messaging.Connection.establish.side_effect = my_conn_error mock_exc_info.return_value = ('a', 'b', None) try: self.conn = Connection(**self.connection_options) except AuthenticationFailure as error: exc_info = sys.exc_info() assert not isinstance(error, QpidException) assert exc_info[1] == 'b' assert exc_info[2] is None else: self.fail('ConnectionError type was not mutated correctly') @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) @patch(QPID_MODULE + '.sys.exc_info') @patch(QPID_MODULE + '.qpid') def test_unknown_connection_error(self, mock_qpid, mock_exc_info): # If we get a connection error that we don't understand, # bubble it up as-is my_conn_error = QpidException(code=999, text='someothertext') mock_qpid.messaging.Connection.establish.side_effect = my_conn_error mock_exc_info.return_value = 'a', 'b', None try: self.conn = Connection(**self.connection_options) except Exception as error: assert error.code == 999 else: self.fail('Connection should have thrown an exception') @patch.object(Transport, 'channel_errors', new=(QpidException, )) @patch(QPID_MODULE + '.qpid') @patch(QPID_MODULE + '.ConnectionError', new=IOError) def test_non_qpid_error_raises(self, mock_qpid): mock_Qpid_Connection = mock_qpid.messaging.Connection my_conn_error = SyntaxError() my_conn_error.text = 'some non auth related error message' mock_Qpid_Connection.establish.side_effect = my_conn_error with pytest.raises(SyntaxError): Connection(**self.connection_options) @patch(QPID_MODULE + '.qpid') @patch(QPID_MODULE + '.ConnectionError', new=IOError) def test_non_auth_conn_error_raises(self, mock_qpid): mock_Qpid_Connection = mock_qpid.messaging.Connection my_conn_error = IOError() my_conn_error.text = 'some non auth related error message' mock_Qpid_Connection.establish.side_effect = my_conn_error with pytest.raises(IOError): Connection(**self.connection_options) @skip.if_python3() @skip.if_pypy() class test_Connection_class_attributes(ConnectionTestBase): def test_connection_verify_class_attributes(self): assert Channel == Connection.Channel @skip.if_python3() @skip.if_pypy() class test_Connection_get_Qpid_connection(ConnectionTestBase): def test_connection_get_qpid_connection(self): self.conn._qpid_conn = Mock() returned_connection = self.conn.get_qpid_connection() assert self.conn._qpid_conn is returned_connection @skip.if_python3() @skip.if_pypy() class test_Connection_close(ConnectionTestBase): def test_connection_close(self): self.conn._qpid_conn = Mock() self.conn.close() self.conn._qpid_conn.close.assert_called_once_with() @skip.if_python3() @skip.if_pypy() class test_Connection_close_channel(ConnectionTestBase): def setup(self): super(test_Connection_close_channel, self).setup() self.conn.channels = Mock() def test_connection_close_channel_removes_channel_from_channel_list(self): mock_channel = Mock() self.conn.close_channel(mock_channel) self.conn.channels.remove.assert_called_once_with(mock_channel) def test_connection_close_channel_handles_ValueError_being_raised(self): self.conn.channels.remove = Mock(side_effect=ValueError()) self.conn.close_channel(Mock()) def test_connection_close_channel_set_channel_connection_to_None(self): mock_channel = Mock() mock_channel.connection = False self.conn.channels.remove = Mock(side_effect=ValueError()) self.conn.close_channel(mock_channel) assert mock_channel.connection is None @skip.if_python3() @skip.if_pypy() class ChannelTestBase(object): @pytest.fixture(autouse=True) def setup_channel(self, patching): self.mock_qpidtoollibs = patching(QPID_MODULE + '.qpidtoollibs') self.mock_broker_agent = self.mock_qpidtoollibs.BrokerAgent self.conn = Mock() self.transport = Mock() self.channel = Channel(self.conn, self.transport) @skip.if_python3() @skip.if_pypy() class test_Channel_purge(ChannelTestBase): def setup(self): self.mock_queue = Mock() def test_gets_queue(self): self.channel._purge(self.mock_queue) getQueue = self.mock_broker_agent.return_value.getQueue getQueue.assert_called_once_with(self.mock_queue) def test_does_not_call_purge_if_message_count_is_zero(self): values = {'msgDepth': 0} queue_obj = self.mock_broker_agent.return_value.getQueue.return_value queue_obj.values = values self.channel._purge(self.mock_queue) assert not queue_obj.purge.called def test_purges_all_messages_from_queue(self): values = {'msgDepth': 5} queue_obj = self.mock_broker_agent.return_value.getQueue.return_value queue_obj.values = values self.channel._purge(self.mock_queue) queue_obj.purge.assert_called_with(5) def test_returns_message_count(self): values = {'msgDepth': 5} queue_obj = self.mock_broker_agent.return_value.getQueue.return_value queue_obj.values = values result = self.channel._purge(self.mock_queue) assert result == 5 @patch(QPID_MODULE + '.NotFound', new=QpidException) def test_raises_channel_error_if_queue_does_not_exist(self): self.mock_broker_agent.return_value.getQueue.return_value = None with pytest.raises(QpidException): self.channel._purge(self.mock_queue) @skip.if_python3() @skip.if_pypy() class test_Channel_put(ChannelTestBase): @patch(QPID_MODULE + '.qpid') def test_channel__put_onto_queue(self, mock_qpid): routing_key = 'routingkey' mock_message = Mock() mock_Message_cls = mock_qpid.messaging.Message self.channel._put(routing_key, mock_message) address_str = '{0}; {{assert: always, node: {{type: queue}}}}'.format( routing_key, ) self.transport.session.sender.assert_called_with(address_str) mock_Message_cls.assert_called_with( content=mock_message, subject=None, ) mock_sender = self.transport.session.sender.return_value mock_sender.send.assert_called_with( mock_Message_cls.return_value, sync=True, ) mock_sender.close.assert_called_with() @patch(QPID_MODULE + '.qpid') def test_channel__put_onto_exchange(self, mock_qpid): mock_routing_key = 'routingkey' mock_exchange_name = 'myexchange' mock_message = Mock() mock_Message_cls = mock_qpid.messaging.Message self.channel._put(mock_routing_key, mock_message, mock_exchange_name) addrstr = '{0}/{1}; {{assert: always, node: {{type: topic}}}}'.format( mock_exchange_name, mock_routing_key, ) self.transport.session.sender.assert_called_with(addrstr) mock_Message_cls.assert_called_with( content=mock_message, subject=mock_routing_key, ) mock_sender = self.transport.session.sender.return_value mock_sender.send.assert_called_with( mock_Message_cls.return_value, sync=True, ) mock_sender.close.assert_called_with() @skip.if_python3() @skip.if_pypy() class test_Channel_get(ChannelTestBase): def test_channel__get(self): mock_queue = Mock() result = self.channel._get(mock_queue) self.transport.session.receiver.assert_called_once_with(mock_queue) mock_rx = self.transport.session.receiver.return_value mock_rx.fetch.assert_called_once_with(timeout=0) mock_rx.close.assert_called_once_with() assert mock_rx.fetch.return_value is result @skip.if_python3() @skip.if_pypy() class test_Channel_close(ChannelTestBase): @pytest.fixture(autouse=True) def setup_basic_cancel(self, patching, setup_channel): self.mock_basic_cancel = patching.object(self.channel, 'basic_cancel') self.channel.closed = False @pytest.fixture(autouse=True) def setup_receivers(self, setup_channel): self.mock_receiver1 = Mock() self.mock_receiver2 = Mock() self.channel._receivers = { 1: self.mock_receiver1, 2: self.mock_receiver2, } def test_channel_close_sets_close_attribute(self): self.channel.close() assert self.channel.closed def test_channel_close_calls_basic_cancel_on_all_receivers(self): self.channel.close() self.mock_basic_cancel.assert_has_calls([call(1), call(2)]) def test_channel_close_calls_close_channel_on_connection(self): self.channel.close() self.conn.close_channel.assert_called_once_with(self.channel) def test_channel_close_calls_close_on_broker_agent(self): self.channel.close() self.channel._broker.close.assert_called_once_with() def test_channel_close_does_nothing_if_already_closed(self): self.channel.closed = True self.channel.close() self.mock_basic_cancel.assert_not_called() def test_channel_close_does_not_call_close_channel_if_conn_is_None(self): self.channel.connection = None self.channel.close() self.conn.close_channel.assert_not_called() @skip.if_python3() @skip.if_pypy() class test_Channel_basic_qos(ChannelTestBase): def test_channel_basic_qos_always_returns_one(self): self.channel.basic_qos(2) assert self.channel.qos.prefetch_count == 1 @skip.if_python3() @skip.if_pypy() class test_Channel_basic_get(ChannelTestBase): @pytest.fixture(autouse=True) def setup_channel_attributes(self, setup_channel): self.channel.Message = Mock() self.channel._get = Mock() def test_channel_basic_get_calls__get_with_queue(self): mock_queue = Mock() self.channel.basic_get(mock_queue) self.channel._get.assert_called_once_with(mock_queue) def test_channel_basic_get_creates_Message_correctly(self): mock_queue = Mock() self.channel.basic_get(mock_queue) mock_raw_message = self.channel._get.return_value.content self.channel.Message.assert_called_once_with( mock_raw_message, channel=self.channel, ) def test_channel_basic_get_acknowledges_message_by_default(self): mock_queue = Mock() self.channel.basic_get(mock_queue) mock_qpid_message = self.channel._get.return_value acknowledge = self.transport.session.acknowledge acknowledge.assert_called_once_with(message=mock_qpid_message) def test_channel_basic_get_acknowledges_message_with_no_ack_False(self): mock_queue = Mock() self.channel.basic_get(mock_queue, no_ack=False) mock_qpid_message = self.channel._get.return_value acknowledge = self.transport.session.acknowledge acknowledge.assert_called_once_with(message=mock_qpid_message) def test_channel_basic_get_acknowledges_message_with_no_ack_True(self): mock_queue = Mock() self.channel.basic_get(mock_queue, no_ack=True) mock_qpid_message = self.channel._get.return_value acknowledge = self.transport.session.acknowledge acknowledge.assert_called_once_with(message=mock_qpid_message) def test_channel_basic_get_returns_correct_message(self): mock_queue = Mock() basic_get_result = self.channel.basic_get(mock_queue) expected_message = self.channel.Message.return_value assert expected_message is basic_get_result def test_basic_get_returns_None_when_channel__get_raises_Empty(self): mock_queue = Mock() self.channel._get = Mock(side_effect=Empty) basic_get_result = self.channel.basic_get(mock_queue) assert self.channel.Message.call_count == 0 assert basic_get_result is None @skip.if_python3() @skip.if_pypy() class test_Channel_basic_cancel(ChannelTestBase): @pytest.fixture(autouse=True) def setup_receivers(self, setup_channel): self.channel._receivers = {1: Mock()} def test_channel_basic_cancel_no_error_if_consumer_tag_not_found(self): self.channel.basic_cancel(2) def test_channel_basic_cancel_pops_receiver(self): self.channel.basic_cancel(1) assert 1 not in self.channel._receivers def test_channel_basic_cancel_closes_receiver(self): mock_receiver = self.channel._receivers[1] self.channel.basic_cancel(1) mock_receiver.close.assert_called_once_with() def test_channel_basic_cancel_pops__tag_to_queue(self): self.channel._tag_to_queue = Mock() self.channel.basic_cancel(1) self.channel._tag_to_queue.pop.assert_called_once_with(1, None) def test_channel_basic_cancel_pops_connection__callbacks(self): self.channel._tag_to_queue = Mock() self.channel.basic_cancel(1) mock_queue = self.channel._tag_to_queue.pop.return_value self.conn._callbacks.pop.assert_called_once_with(mock_queue, None) @skip.if_python3() @skip.if_pypy() class test_Channel__init__(ChannelTestBase, ExtraAssertionsMixin): def test_channel___init__sets_variables_as_expected(self): assert self.conn is self.channel.connection assert self.transport is self.channel.transport assert not self.channel.closed self.conn.get_qpid_connection.assert_called_once_with() expected_broker_agent = self.mock_broker_agent.return_value assert self.channel._broker is expected_broker_agent self.assertDictEqual(self.channel._tag_to_queue, {}) self.assertDictEqual(self.channel._receivers, {}) assert self.channel._qos is None @skip.if_python3() @skip.if_pypy() class test_Channel_basic_consume(ChannelTestBase, ExtraAssertionsMixin): @pytest.fixture(autouse=True) def setup_callbacks(self, setup_channel): self.conn._callbacks = {} def test_channel_basic_consume_adds_queue_to__tag_to_queue(self): mock_tag = Mock() mock_queue = Mock() self.channel.basic_consume(mock_queue, Mock(), Mock(), mock_tag) expected_dict = {mock_tag: mock_queue} self.assertDictEqual(expected_dict, self.channel._tag_to_queue) def test_channel_basic_consume_adds_entry_to_connection__callbacks(self): mock_queue = Mock() self.channel.basic_consume(mock_queue, Mock(), Mock(), Mock()) assert mock_queue in self.conn._callbacks assert isinstance(self.conn._callbacks[mock_queue], Callable) def test_channel_basic_consume_creates_new_receiver(self): mock_queue = Mock() self.channel.basic_consume(mock_queue, Mock(), Mock(), Mock()) self.transport.session.receiver.assert_called_once_with(mock_queue) def test_channel_basic_consume_saves_new_receiver(self): mock_tag = Mock() self.channel.basic_consume(Mock(), Mock(), Mock(), mock_tag) new_mock_receiver = self.transport.session.receiver.return_value expected_dict = {mock_tag: new_mock_receiver} self.assertDictEqual(expected_dict, self.channel._receivers) def test_channel_basic_consume_sets_capacity_on_new_receiver(self): mock_prefetch_count = Mock() self.channel.qos.prefetch_count = mock_prefetch_count self.channel.basic_consume(Mock(), Mock(), Mock(), Mock()) new_receiver = self.transport.session.receiver.return_value assert new_receiver.capacity is mock_prefetch_count def get_callback(self, no_ack=Mock(), original_cb=Mock()): self.channel.Message = Mock() mock_queue = Mock() self.channel.basic_consume(mock_queue, no_ack, original_cb, Mock()) return self.conn._callbacks[mock_queue] def test_channel_basic_consume_callback_creates_Message_correctly(self): callback = self.get_callback() mock_qpid_message = Mock() callback(mock_qpid_message) mock_content = mock_qpid_message.content self.channel.Message.assert_called_once_with( mock_content, channel=self.channel, ) def test_channel_basic_consume_callback_adds_message_to_QoS(self): self.channel._qos = Mock() callback = self.get_callback() mock_qpid_message = Mock() callback(mock_qpid_message) mock_delivery_tag = self.channel.Message.return_value.delivery_tag self.channel._qos.append.assert_called_once_with( mock_qpid_message, mock_delivery_tag, ) def test_channel_basic_consume_callback_gratuitously_acks(self): self.channel.basic_ack = Mock() callback = self.get_callback() mock_qpid_message = Mock() callback(mock_qpid_message) mock_delivery_tag = self.channel.Message.return_value.delivery_tag self.channel.basic_ack.assert_called_once_with(mock_delivery_tag) def test_channel_basic_consume_callback_does_not_ack_when_needed(self): self.channel.basic_ack = Mock() callback = self.get_callback(no_ack=False) mock_qpid_message = Mock() callback(mock_qpid_message) self.channel.basic_ack.assert_not_called() def test_channel_basic_consume_callback_calls_real_callback(self): self.channel.basic_ack = Mock() mock_original_callback = Mock() callback = self.get_callback(original_cb=mock_original_callback) mock_qpid_message = Mock() callback(mock_qpid_message) expected_message = self.channel.Message.return_value mock_original_callback.assert_called_once_with(expected_message) @skip.if_python3() @skip.if_pypy() class test_Channel_queue_delete(ChannelTestBase): @pytest.fixture(autouse=True) def setup_channel_patches(self, patching, setup_channel): self.mock__has_queue = patching.object(self.channel, '_has_queue') self.mock__size = patching.object(self.channel, '_size') self.mock__delete = patching.object(self.channel, '_delete') self.mock_queue = Mock() def test_checks_if_queue_exists(self): self.channel.queue_delete(self.mock_queue) self.mock__has_queue.assert_called_once_with(self.mock_queue) def test_does_nothing_if_queue_does_not_exist(self): self.mock__has_queue.return_value = False self.channel.queue_delete(self.mock_queue) self.mock__delete.assert_not_called() def test_not_empty_and_if_empty_True_no_delete(self): self.mock__size.return_value = 1 self.channel.queue_delete(self.mock_queue, if_empty=True) mock_broker = self.mock_broker_agent.return_value mock_broker.getQueue.assert_not_called() def test_calls_get_queue(self): self.channel.queue_delete(self.mock_queue) getQueue = self.mock_broker_agent.return_value.getQueue getQueue.assert_called_once_with(self.mock_queue) def test_gets_queue_attribute(self): self.channel.queue_delete(self.mock_queue) queue_obj = self.mock_broker_agent.return_value.getQueue.return_value queue_obj.getAttributes.assert_called_once_with() def test_queue_in_use_and_if_unused_no_delete(self): queue_obj = self.mock_broker_agent.return_value.getQueue.return_value queue_obj.getAttributes.return_value = {'consumerCount': 1} self.channel.queue_delete(self.mock_queue, if_unused=True) self.mock__delete.assert_not_called() def test_calls__delete_with_queue(self): self.channel.queue_delete(self.mock_queue) self.mock__delete.assert_called_once_with(self.mock_queue) @skip.if_python3() @skip.if_pypy() class test_Channel(ExtraAssertionsMixin): @patch(QPID_MODULE + '.qpidtoollibs') def setup(self, mock_qpidtoollibs): self.mock_connection = Mock() self.mock_qpid_connection = Mock() self.mock_qpid_session = Mock() self.mock_qpid_connection.session = Mock( return_value=self.mock_qpid_session, ) self.mock_connection.get_qpid_connection = Mock( return_value=self.mock_qpid_connection, ) self.mock_transport = Mock() self.mock_broker = Mock() self.mock_Message = Mock() self.mock_BrokerAgent = mock_qpidtoollibs.BrokerAgent self.mock_BrokerAgent.return_value = self.mock_broker self.my_channel = Channel( self.mock_connection, self.mock_transport, ) self.my_channel.Message = self.mock_Message def test_verify_QoS_class_attribute(self): """Verify that the class attribute QoS refers to the QoS object""" assert QoS is Channel.QoS def test_verify_Message_class_attribute(self): """Verify that the class attribute Message refers to the Message object.""" assert Message is Channel.Message def test_body_encoding_class_attribute(self): """Verify that the class attribute body_encoding is set to base64""" assert Channel.body_encoding == 'base64' def test_codecs_class_attribute(self): """Verify that the codecs class attribute has a correct key and value.""" assert isinstance(Channel.codecs, dict) assert 'base64' in Channel.codecs assert isinstance(Channel.codecs['base64'], Base64) def test_size(self): """Test getting the number of messages in a queue specified by name and returning them.""" message_count = 5 mock_queue = Mock() mock_queue_to_check = Mock() mock_queue_to_check.values = {'msgDepth': message_count} self.mock_broker.getQueue.return_value = mock_queue_to_check result = self.my_channel._size(mock_queue) self.mock_broker.getQueue.assert_called_with(mock_queue) assert message_count == result def test_delete(self): """Test deleting a queue calls purge and delQueue with queue name.""" mock_queue = Mock() self.my_channel._purge = Mock() result = self.my_channel._delete(mock_queue) self.my_channel._purge.assert_called_with(mock_queue) self.mock_broker.delQueue.assert_called_with(mock_queue) assert result is None def test_has_queue_true(self): """Test checking if a queue exists, and it does.""" mock_queue = Mock() self.mock_broker.getQueue.return_value = True result = self.my_channel._has_queue(mock_queue) assert result def test_has_queue_false(self): """Test checking if a queue exists, and it does not.""" mock_queue = Mock() self.mock_broker.getQueue.return_value = False result = self.my_channel._has_queue(mock_queue) assert not result @patch('amqp.protocol.queue_declare_ok_t') def test_queue_declare_with_exception_raised(self, mock_queue_declare_ok_t): """Test declare_queue, where an exception is raised and silenced.""" mock_queue = Mock() mock_passive = Mock() mock_durable = Mock() mock_exclusive = Mock() mock_auto_delete = Mock() mock_nowait = Mock() mock_arguments = Mock() mock_msg_count = Mock() mock_queue.startswith.return_value = False mock_queue.endswith.return_value = False options = { 'passive': mock_passive, 'durable': mock_durable, 'exclusive': mock_exclusive, 'auto-delete': mock_auto_delete, 'arguments': mock_arguments, } mock_consumer_count = Mock() mock_return_value = Mock() values_dict = { 'msgDepth': mock_msg_count, 'consumerCount': mock_consumer_count, } mock_queue_data = Mock() mock_queue_data.values = values_dict exception_to_raise = Exception('The foo object already exists.') self.mock_broker.addQueue.side_effect = exception_to_raise self.mock_broker.getQueue.return_value = mock_queue_data mock_queue_declare_ok_t.return_value = mock_return_value result = self.my_channel.queue_declare( mock_queue, passive=mock_passive, durable=mock_durable, exclusive=mock_exclusive, auto_delete=mock_auto_delete, nowait=mock_nowait, arguments=mock_arguments, ) self.mock_broker.addQueue.assert_called_with( mock_queue, options=options, ) mock_queue_declare_ok_t.assert_called_with( mock_queue, mock_msg_count, mock_consumer_count, ) assert mock_return_value is result def test_queue_declare_set_ring_policy_for_celeryev(self): """Test declare_queue sets ring_policy for celeryev.""" mock_queue = Mock() mock_queue.startswith.return_value = True mock_queue.endswith.return_value = False expected_default_options = { 'passive': False, 'durable': False, 'exclusive': False, 'auto-delete': True, 'arguments': None, 'qpid.policy_type': 'ring', } mock_msg_count = Mock() mock_consumer_count = Mock() values_dict = { 'msgDepth': mock_msg_count, 'consumerCount': mock_consumer_count, } mock_queue_data = Mock() mock_queue_data.values = values_dict self.mock_broker.addQueue.return_value = None self.mock_broker.getQueue.return_value = mock_queue_data self.my_channel.queue_declare(mock_queue) mock_queue.startswith.assert_called_with('celeryev') self.mock_broker.addQueue.assert_called_with( mock_queue, options=expected_default_options, ) def test_queue_declare_set_ring_policy_for_pidbox(self): """Test declare_queue sets ring_policy for pidbox.""" mock_queue = Mock() mock_queue.startswith.return_value = False mock_queue.endswith.return_value = True expected_default_options = { 'passive': False, 'durable': False, 'exclusive': False, 'auto-delete': True, 'arguments': None, 'qpid.policy_type': 'ring', } mock_msg_count = Mock() mock_consumer_count = Mock() values_dict = { 'msgDepth': mock_msg_count, 'consumerCount': mock_consumer_count, } mock_queue_data = Mock() mock_queue_data.values = values_dict self.mock_broker.addQueue.return_value = None self.mock_broker.getQueue.return_value = mock_queue_data self.my_channel.queue_declare(mock_queue) mock_queue.endswith.assert_called_with('pidbox') self.mock_broker.addQueue.assert_called_with( mock_queue, options=expected_default_options, ) def test_queue_declare_ring_policy_not_set_as_expected(self): """Test declare_queue does not set ring_policy as expected.""" mock_queue = Mock() mock_queue.startswith.return_value = False mock_queue.endswith.return_value = False expected_default_options = { 'passive': False, 'durable': False, 'exclusive': False, 'auto-delete': True, 'arguments': None, } mock_msg_count = Mock() mock_consumer_count = Mock() values_dict = { 'msgDepth': mock_msg_count, 'consumerCount': mock_consumer_count, } mock_queue_data = Mock() mock_queue_data.values = values_dict self.mock_broker.addQueue.return_value = None self.mock_broker.getQueue.return_value = mock_queue_data self.my_channel.queue_declare(mock_queue) mock_queue.startswith.assert_called_with('celeryev') mock_queue.endswith.assert_called_with('pidbox') self.mock_broker.addQueue.assert_called_with( mock_queue, options=expected_default_options, ) def test_queue_declare_test_defaults(self): """Test declare_queue defaults.""" mock_queue = Mock() mock_queue.startswith.return_value = False mock_queue.endswith.return_value = False expected_default_options = { 'passive': False, 'durable': False, 'exclusive': False, 'auto-delete': True, 'arguments': None, } mock_msg_count = Mock() mock_consumer_count = Mock() values_dict = { 'msgDepth': mock_msg_count, 'consumerCount': mock_consumer_count, } mock_queue_data = Mock() mock_queue_data.values = values_dict self.mock_broker.addQueue.return_value = None self.mock_broker.getQueue.return_value = mock_queue_data self.my_channel.queue_declare(mock_queue) self.mock_broker.addQueue.assert_called_with( mock_queue, options=expected_default_options, ) def test_queue_declare_raises_exception_not_silenced(self): unique_exception = Exception('This exception should not be silenced') mock_queue = Mock() self.mock_broker.addQueue.side_effect = unique_exception with pytest.raises(unique_exception.__class__): self.my_channel.queue_declare(mock_queue) self.mock_broker.addQueue.assert_called_once_with( mock_queue, options={ 'exclusive': False, 'durable': False, 'qpid.policy_type': 'ring', 'passive': False, 'arguments': None, 'auto-delete': True }) def test_exchange_declare_raises_exception_and_silenced(self): """Create exchange where an exception is raised and then silenced""" self.mock_broker.addExchange.side_effect = Exception( 'The foo object already exists.', ) self.my_channel.exchange_declare() def test_exchange_declare_raises_exception_not_silenced(self): """Create Exchange where an exception is raised and not silenced.""" unique_exception = Exception('This exception should not be silenced') self.mock_broker.addExchange.side_effect = unique_exception with pytest.raises(unique_exception.__class__): self.my_channel.exchange_declare() def test_exchange_declare(self): """Create Exchange where an exception is NOT raised.""" mock_exchange = Mock() mock_type = Mock() mock_durable = Mock() options = {'durable': mock_durable} result = self.my_channel.exchange_declare( mock_exchange, mock_type, mock_durable, ) self.mock_broker.addExchange.assert_called_with( mock_type, mock_exchange, options, ) assert result is None def test_exchange_delete(self): """Test the deletion of an exchange by name.""" mock_exchange = Mock() result = self.my_channel.exchange_delete(mock_exchange) self.mock_broker.delExchange.assert_called_with(mock_exchange) assert result is None def test_queue_bind(self): """Test binding a queue to an exchange using a routing key.""" mock_queue = Mock() mock_exchange = Mock() mock_routing_key = Mock() self.my_channel.queue_bind( mock_queue, mock_exchange, mock_routing_key, ) self.mock_broker.bind.assert_called_with( mock_exchange, mock_queue, mock_routing_key, ) def test_queue_unbind(self): """Test unbinding a queue from an exchange using a routing key.""" mock_queue = Mock() mock_exchange = Mock() mock_routing_key = Mock() self.my_channel.queue_unbind( mock_queue, mock_exchange, mock_routing_key, ) self.mock_broker.unbind.assert_called_with( mock_exchange, mock_queue, mock_routing_key, ) def test_queue_purge(self): """Test purging a queue by name.""" mock_queue = Mock() purge_result = Mock() self.my_channel._purge = Mock(return_value=purge_result) result = self.my_channel.queue_purge(mock_queue) self.my_channel._purge.assert_called_with(mock_queue) assert purge_result is result @patch(QPID_MODULE + '.Channel.qos') def test_basic_ack(self, mock_qos): """Test that basic_ack calls the QoS object properly.""" mock_delivery_tag = Mock() self.my_channel.basic_ack(mock_delivery_tag) mock_qos.ack.assert_called_with(mock_delivery_tag) @patch(QPID_MODULE + '.Channel.qos') def test_basic_reject(self, mock_qos): """Test that basic_reject calls the QoS object properly.""" mock_delivery_tag = Mock() mock_requeue_value = Mock() self.my_channel.basic_reject(mock_delivery_tag, mock_requeue_value) mock_qos.reject.assert_called_with( mock_delivery_tag, requeue=mock_requeue_value, ) def test_qos_manager_is_none(self): """Test the qos property if the QoS object did not already exist.""" self.my_channel._qos = None result = self.my_channel.qos assert isinstance(result, QoS) assert result == self.my_channel._qos def test_qos_manager_already_exists(self): """Test the qos property if the QoS object already exists.""" mock_existing_qos = Mock() self.my_channel._qos = mock_existing_qos result = self.my_channel.qos assert mock_existing_qos is result def test_prepare_message(self): """Test that prepare_message() returns the correct result.""" mock_body = Mock() mock_priority = Mock() mock_content_encoding = Mock() mock_content_type = Mock() mock_header1 = Mock() mock_header2 = Mock() mock_properties1 = Mock() mock_properties2 = Mock() headers = {'header1': mock_header1, 'header2': mock_header2} properties = {'properties1': mock_properties1, 'properties2': mock_properties2} result = self.my_channel.prepare_message( mock_body, priority=mock_priority, content_type=mock_content_type, content_encoding=mock_content_encoding, headers=headers, properties=properties) assert mock_body is result['body'] assert mock_content_encoding is result['content-encoding'] assert mock_content_type is result['content-type'] self.assertDictEqual(headers, result['headers']) self.assertDictContainsSubset(properties, result['properties']) assert (mock_priority is result['properties']['delivery_info']['priority']) @patch('__builtin__.buffer') @patch(QPID_MODULE + '.Channel.body_encoding') @patch(QPID_MODULE + '.Channel.encode_body') @patch(QPID_MODULE + '.Channel._put') def test_basic_publish(self, mock_put, mock_encode_body, mock_body_encoding, mock_buffer): """Test basic_publish().""" mock_original_body = Mock() mock_encoded_body = 'this is my encoded body' mock_message = {'body': mock_original_body, 'properties': {'delivery_info': {}}} mock_encode_body.return_value = ( mock_encoded_body, mock_body_encoding, ) mock_exchange = Mock() mock_routing_key = Mock() mock_encoded_buffered_body = Mock() mock_buffer.return_value = mock_encoded_buffered_body self.my_channel.basic_publish( mock_message, mock_exchange, mock_routing_key, ) mock_encode_body.assert_called_once_with( mock_original_body, mock_body_encoding, ) mock_buffer.assert_called_once_with(mock_encoded_body) assert mock_message['body'] is mock_encoded_buffered_body assert (mock_message['properties']['body_encoding'] is mock_body_encoding) assert isinstance( mock_message['properties']['delivery_tag'], uuid.UUID) assert (mock_message['properties']['delivery_info']['exchange'] is mock_exchange) assert (mock_message['properties']['delivery_info']['routing_key'] is mock_routing_key) mock_put.assert_called_with( mock_routing_key, mock_message, mock_exchange, ) @patch(QPID_MODULE + '.Channel.codecs') def test_encode_body_expected_encoding(self, mock_codecs): """Test if encode_body() works when encoding is set correctly""" mock_body = Mock() mock_encoder = Mock() mock_encoded_result = Mock() mock_codecs.get.return_value = mock_encoder mock_encoder.encode.return_value = mock_encoded_result result = self.my_channel.encode_body(mock_body, encoding='base64') expected_result = (mock_encoded_result, 'base64') assert expected_result == result @patch(QPID_MODULE + '.Channel.codecs') def test_encode_body_not_expected_encoding(self, mock_codecs): """Test if encode_body() works when encoding is not set correctly.""" mock_body = Mock() result = self.my_channel.encode_body(mock_body, encoding=None) expected_result = mock_body, None assert expected_result == result @patch(QPID_MODULE + '.Channel.codecs') def test_decode_body_expected_encoding(self, mock_codecs): """Test if decode_body() works when encoding is set correctly.""" mock_body = Mock() mock_decoder = Mock() mock_decoded_result = Mock() mock_codecs.get.return_value = mock_decoder mock_decoder.decode.return_value = mock_decoded_result result = self.my_channel.decode_body(mock_body, encoding='base64') assert mock_decoded_result == result @patch(QPID_MODULE + '.Channel.codecs') def test_decode_body_not_expected_encoding(self, mock_codecs): """Test if decode_body() works when encoding is not set correctly.""" mock_body = Mock() result = self.my_channel.decode_body(mock_body, encoding=None) assert mock_body == result def test_typeof_exchange_exists(self): """Test that typeof() finds an exchange that already exists.""" mock_exchange = Mock() mock_qpid_exchange = Mock() mock_attributes = {} mock_type = Mock() mock_attributes['type'] = mock_type mock_qpid_exchange.getAttributes.return_value = mock_attributes self.mock_broker.getExchange.return_value = mock_qpid_exchange result = self.my_channel.typeof(mock_exchange) assert mock_type is result def test_typeof_exchange_does_not_exist(self): """Test that typeof() finds an exchange that does not exists.""" mock_exchange = Mock() mock_default = Mock() self.mock_broker.getExchange.return_value = None result = self.my_channel.typeof(mock_exchange, default=mock_default) assert mock_default is result @skip.if_python3() @skip.if_pypy() @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport__init__(object): @pytest.fixture(autouse=True) def mock_verify_runtime_environment(self, patching): self.mock_verify_runtime_environment = patching.object( Transport, 'verify_runtime_environment') @pytest.fixture(autouse=True) def mock_transport_init(self, patching): self.mock_base_Transport__init__ = patching( QPID_MODULE + '.base.Transport.__init__') def test_Transport___init___calls_verify_runtime_environment(self): Transport(Mock()) self.mock_verify_runtime_environment.assert_called_once_with() def test_transport___init___calls_parent_class___init__(self): m = Mock() Transport(m) self.mock_base_Transport__init__.assert_called_once_with(m) def test_transport___init___sets_use_async_interface_False(self): transport = Transport(Mock()) assert not transport.use_async_interface @skip.if_python3() @skip.if_pypy() @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_drain_events(object): @pytest.fixture(autouse=True) def setup_self(self, disable_runtime_dependency_check): # ^^ disable_runtime.. must be called before this fixture. self.transport = Transport(Mock()) self.transport.session = Mock() self.mock_queue = Mock() self.mock_message = Mock() self.mock_conn = Mock() self.mock_callback = Mock() self.mock_conn._callbacks = {self.mock_queue: self.mock_callback} def mock_next_receiver(self, timeout): time.sleep(0.3) mock_receiver = Mock() mock_receiver.source = self.mock_queue mock_receiver.fetch.return_value = self.mock_message return mock_receiver def test_socket_timeout_raised_when_all_receivers_empty(self): with patch(QPID_MODULE + '.QpidEmpty', new=QpidException): self.transport.session.next_receiver.side_effect = QpidException() with pytest.raises(socket.timeout): self.transport.drain_events(Mock()) def test_socket_timeout_raised_when_by_timeout(self): self.transport.session.next_receiver = self.mock_next_receiver with pytest.raises(socket.timeout): self.transport.drain_events(self.mock_conn, timeout=1) def test_timeout_returns_no_earlier_then_asked_for(self): self.transport.session.next_receiver = self.mock_next_receiver start_time = monotonic() try: self.transport.drain_events(self.mock_conn, timeout=1) except socket.timeout: pass elapsed_time_in_s = monotonic() - start_time assert elapsed_time_in_s >= 1.0 def test_callback_is_called(self): self.transport.session.next_receiver = self.mock_next_receiver try: self.transport.drain_events(self.mock_conn, timeout=1) except socket.timeout: pass self.mock_callback.assert_called_with(self.mock_message) @skip.if_python3() @skip.if_pypy() class test_Transport_create_channel(object): @pytest.fixture(autouse=True) def setup_self(self, disable_runtime_dependency_check): # ^^ disable runtime MUST be called before this fixture self.transport = Transport(Mock()) self.mock_conn = Mock() self.mock_new_channel = Mock() self.mock_conn.Channel.return_value = self.mock_new_channel self.returned_channel = self.transport.create_channel(self.mock_conn) def test_new_channel_created_from_connection(self): assert self.mock_new_channel is self.returned_channel self.mock_conn.Channel.assert_called_with( self.mock_conn, self.transport, ) def test_new_channel_added_to_connection_channel_list(self): append_method = self.mock_conn.channels.append append_method.assert_called_with(self.mock_new_channel) @skip.if_python3() @skip.if_pypy() @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_establish_connection(object): @pytest.fixture(autouse=True) def setup_self(self, disable_runtime_dependency_check): class MockClient(object): pass self.client = MockClient() self.client.connect_timeout = 4 self.client.ssl = False self.client.transport_options = {} self.client.userid = None self.client.password = None self.client.login_method = None self.transport = Transport(self.client) self.mock_conn = Mock() self.transport.Connection = self.mock_conn def test_transport_establish_conn_new_option_overwrites_default(self): self.client.userid = 'new-userid' self.client.password = 'new-password' self.transport.establish_connection() self.mock_conn.assert_called_once_with( username=self.client.userid, password=self.client.password, sasl_mechanisms='PLAIN', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_establish_conn_empty_client_is_default(self): self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='ANONYMOUS', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_establish_conn_additional_transport_option(self): new_param_value = 'mynewparam' self.client.transport_options['new_param'] = new_param_value self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='ANONYMOUS', host='localhost', timeout=4, new_param=new_param_value, port=5672, transport='tcp', ) def test_transport_establish_conn_transform_localhost_to_127_0_0_1(self): self.client.hostname = 'localhost' self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='ANONYMOUS', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_password_no_userid_raises_exception(self): self.client.password = 'somepass' with pytest.raises(Exception): self.transport.establish_connection() def test_transport_userid_no_password_raises_exception(self): self.client.userid = 'someusername' with pytest.raises(Exception): self.transport.establish_connection() def test_transport_overrides_sasl_mech_from_login_method(self): self.client.login_method = 'EXTERNAL' self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='EXTERNAL', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_overrides_sasl_mech_has_username(self): self.client.userid = 'new-userid' self.client.login_method = 'EXTERNAL' self.transport.establish_connection() self.mock_conn.assert_called_once_with( username=self.client.userid, sasl_mechanisms='EXTERNAL', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_establish_conn_set_password(self): self.client.userid = 'someuser' self.client.password = 'somepass' self.transport.establish_connection() self.mock_conn.assert_called_once_with( username='someuser', password='somepass', sasl_mechanisms='PLAIN', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_establish_conn_no_ssl_sets_transport_tcp(self): self.client.ssl = False self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='ANONYMOUS', host='localhost', timeout=4, port=5672, transport='tcp', ) def test_transport_establish_conn_with_ssl_with_hostname_check(self): self.client.ssl = { 'keyfile': 'my_keyfile', 'certfile': 'my_certfile', 'ca_certs': 'my_cacerts', 'cert_reqs': ssl.CERT_REQUIRED, } self.transport.establish_connection() self.mock_conn.assert_called_once_with( ssl_certfile='my_certfile', ssl_trustfile='my_cacerts', timeout=4, ssl_skip_hostname_check=False, sasl_mechanisms='ANONYMOUS', host='localhost', ssl_keyfile='my_keyfile', port=5672, transport='ssl', ) def test_transport_establish_conn_with_ssl_skip_hostname_check(self): self.client.ssl = { 'keyfile': 'my_keyfile', 'certfile': 'my_certfile', 'ca_certs': 'my_cacerts', 'cert_reqs': ssl.CERT_OPTIONAL, } self.transport.establish_connection() self.mock_conn.assert_called_once_with( ssl_certfile='my_certfile', ssl_trustfile='my_cacerts', timeout=4, ssl_skip_hostname_check=True, sasl_mechanisms='ANONYMOUS', host='localhost', ssl_keyfile='my_keyfile', port=5672, transport='ssl', ) def test_transport_establish_conn_sets_client_on_connection_object(self): self.transport.establish_connection() assert self.mock_conn.return_value.client is self.client def test_transport_establish_conn_creates_session_on_transport(self): self.transport.establish_connection() qpid_conn = self.mock_conn.return_value.get_qpid_connection new_mock_session = qpid_conn.return_value.session.return_value assert self.transport.session is new_mock_session def test_transport_establish_conn_returns_new_connection_object(self): new_conn = self.transport.establish_connection() assert new_conn is self.mock_conn.return_value def test_transport_establish_conn_uses_hostname_if_not_default(self): self.client.hostname = 'some_other_hostname' self.transport.establish_connection() self.mock_conn.assert_called_once_with( sasl_mechanisms='ANONYMOUS', host='some_other_hostname', timeout=4, port=5672, transport='tcp', ) def test_transport_sets_qpid_message_ready_handler(self): self.transport.establish_connection() qpid_conn_call = self.mock_conn.return_value.get_qpid_connection mock_session = qpid_conn_call.return_value.session.return_value mock_set_callback = mock_session.set_message_received_notify_handler expected_msg_callback = self.transport._qpid_message_ready_handler mock_set_callback.assert_called_once_with(expected_msg_callback) def test_transport_sets_session_exception_handler(self): self.transport.establish_connection() qpid_conn_call = self.mock_conn.return_value.get_qpid_connection mock_session = qpid_conn_call.return_value.session.return_value mock_set_callback = mock_session.set_async_exception_notify_handler exc_callback = self.transport._qpid_async_exception_notify_handler mock_set_callback.assert_called_once_with(exc_callback) def test_transport_sets_connection_exception_handler(self): self.transport.establish_connection() qpid_conn_call = self.mock_conn.return_value.get_qpid_connection qpid_conn = qpid_conn_call.return_value mock_set_callback = qpid_conn.set_async_exception_notify_handler exc_callback = self.transport._qpid_async_exception_notify_handler mock_set_callback.assert_called_once_with(exc_callback) @skip.if_python3() @skip.if_pypy() class test_Transport_class_attributes(object): def test_verify_Connection_attribute(self): assert Connection is Transport.Connection def test_verify_polling_disabled(self): assert Transport.polling_interval is None def test_transport_verify_supports_asynchronous_events(self): assert Transport.supports_ev def test_verify_driver_type_and_name(self): assert Transport.driver_type == 'qpid' assert Transport.driver_name == 'qpid' def test_transport_verify_recoverable_connection_errors(self): connection_errors = Transport.recoverable_connection_errors assert ConnectionError in connection_errors assert select.error in connection_errors def test_transport_verify_recoverable_channel_errors(self): channel_errors = Transport.recoverable_channel_errors assert NotFound in channel_errors def test_transport_verify_pre_kombu_3_0_exception_labels(self): assert (Transport.recoverable_channel_errors == Transport.channel_errors) assert (Transport.recoverable_connection_errors == Transport.connection_errors) @skip.if_python3() @skip.if_pypy() @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_register_with_event_loop(object): def test_transport_register_with_event_loop_calls_add_reader(self): transport = Transport(Mock()) mock_connection = Mock() mock_loop = Mock() transport.register_with_event_loop(mock_connection, mock_loop) mock_loop.add_reader.assert_called_with( transport.r, transport.on_readable, mock_connection, mock_loop, ) @skip.if_python3() @skip.if_pypy() @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_Qpid_callback_handlers_async(object): @pytest.fixture(autouse=True) def setup_self(self, patching, disable_runtime_dependency_check): self.mock_os_write = patching(QPID_MODULE + '.os.write') self.transport = Transport(Mock()) self.transport.register_with_event_loop(Mock(), Mock()) def test__qpid_message_ready_handler_writes_symbol_to_fd(self): self.transport._qpid_message_ready_handler(Mock()) self.mock_os_write.assert_called_once_with(self.transport._w, '0') def test__qpid_async_exception_notify_handler_writes_symbol_to_fd(self): self.transport._qpid_async_exception_notify_handler(Mock(), Mock()) self.mock_os_write.assert_called_once_with(self.transport._w, 'e') @skip.if_python3() @skip.if_pypy() @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_Qpid_callback_handlers_sync(object): @pytest.fixture(autouse=True) def setup(self, patching, disable_runtime_dependency_check): self.mock_os_write = patching(QPID_MODULE + '.os.write') self.transport = Transport(Mock()) def test__qpid_message_ready_handler_dows_not_write(self): self.transport._qpid_message_ready_handler(Mock()) self.mock_os_write.assert_not_called() def test__qpid_async_exception_notify_handler_does_not_write(self): self.transport._qpid_async_exception_notify_handler(Mock(), Mock()) self.mock_os_write.assert_not_called() @skip.if_python3() @skip.if_pypy() @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_on_readable(object): @pytest.fixture(autouse=True) def setup_self(self, patching, disable_runtime_dependency_check): self.mock_os_read = patching(QPID_MODULE + '.os.read') self.mock_drain_events = patching.object(Transport, 'drain_events') self.transport = Transport(Mock()) self.transport.register_with_event_loop(Mock(), Mock()) def test_transport_on_readable_reads_symbol_from_fd(self): self.transport.on_readable(Mock(), Mock()) self.mock_os_read.assert_called_once_with(self.transport.r, 1) def test_transport_on_readable_calls_drain_events(self): mock_connection = Mock() self.transport.on_readable(mock_connection, Mock()) self.mock_drain_events.assert_called_with(mock_connection) def test_transport_on_readable_catches_socket_timeout(self): self.mock_drain_events.side_effect = socket.timeout() self.transport.on_readable(Mock(), Mock()) def test_transport_on_readable_ignores_non_socket_timeout_exception(self): self.mock_drain_events.side_effect = IOError() with pytest.raises(IOError): self.transport.on_readable(Mock(), Mock()) @skip.if_python3() @skip.if_pypy() @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport_verify_runtime_environment(object): @pytest.fixture(autouse=True) def setup_self(self, patching): self.verify_runtime_environment = Transport.verify_runtime_environment patching.object(Transport, 'verify_runtime_environment') self.transport = Transport(Mock()) @patch(QPID_MODULE + '.PY3', new=True) def test_raises_exception_for_Python3(self): with pytest.raises(RuntimeError): self.verify_runtime_environment(self.transport) @patch('__builtin__.getattr') def test_raises_exc_for_PyPy(self, mock_getattr): mock_getattr.return_value = True with pytest.raises(RuntimeError): self.verify_runtime_environment(self.transport) @patch(QPID_MODULE + '.dependency_is_none') def test_raises_exc_dep_missing(self, mock_dep_is_none): mock_dep_is_none.return_value = True with pytest.raises(RuntimeError): self.verify_runtime_environment(self.transport) @patch(QPID_MODULE + '.dependency_is_none') def test_calls_dependency_is_none(self, mock_dep_is_none): mock_dep_is_none.return_value = False self.verify_runtime_environment(self.transport) mock_dep_is_none.assert_called() def test_raises_no_exception(self): self.verify_runtime_environment(self.transport) @skip.if_python3() @skip.if_pypy() @pytest.mark.usefixtures('disable_runtime_dependency_check') class test_Transport(ExtraAssertionsMixin): def setup(self): """Creates a mock_client to be used in testing.""" self.mock_client = Mock() def test_close_connection(self): """Test that close_connection calls close on the connection.""" my_transport = Transport(self.mock_client) mock_connection = Mock() my_transport.close_connection(mock_connection) mock_connection.close.assert_called_once_with() def test_default_connection_params(self): """Test that the default_connection_params are correct""" correct_params = { 'hostname': 'localhost', 'port': 5672, } my_transport = Transport(self.mock_client) result_params = my_transport.default_connection_params self.assertDictEqual(correct_params, result_params) @patch(QPID_MODULE + '.os.close') def test_del_sync(self, close): my_transport = Transport(self.mock_client) my_transport.__del__() close.assert_not_called() @patch(QPID_MODULE + '.os.close') def test_del_async(self, close): my_transport = Transport(self.mock_client) my_transport.register_with_event_loop(Mock(), Mock()) my_transport.__del__() close.assert_called() @patch(QPID_MODULE + '.os.close') def test_del_async_failed(self, close): close.side_effect = OSError() my_transport = Transport(self.mock_client) my_transport.register_with_event_loop(Mock(), Mock()) my_transport.__del__() close.assert_called() kombu-4.1.0/t/unit/transport/test_base.py0000644000175000017500000001301113130603207020347 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock from kombu import Connection, Consumer, Exchange, Producer, Queue from kombu.five import text_t from kombu.message import Message from kombu.transport.base import ( StdChannel, Transport, Management, to_rabbitmq_queue_arguments, ) @pytest.mark.parametrize('args,input,expected', [ ({}, {'message_ttl': 20}, {'x-message-ttl': 20000}), ({}, {'message_ttl': None}, {}), ({'foo': 'bar'}, {'expires': 30.3}, {'x-expires': 30300, 'foo': 'bar'}), ({'x-expires': 3}, {'expires': 4}, {'x-expires': 4000}), ({}, {'max_length': 10}, {'x-max-length': 10}), ({}, {'max_length_bytes': 1033}, {'x-max-length-bytes': 1033}), ({}, {'max_priority': 303}, {'x-max-priority': 303}), ]) def test_rabbitmq_queue_arguments(args, input, expected): assert to_rabbitmq_queue_arguments(args, **input) == expected class test_StdChannel: def setup(self): self.conn = Connection('memory://') self.channel = self.conn.channel() self.channel.queues.clear() self.conn.connection.state.clear() def test_Consumer(self): q = Queue('foo', Exchange('foo')) cons = self.channel.Consumer(q) assert isinstance(cons, Consumer) assert cons.channel is self.channel def test_Producer(self): prod = self.channel.Producer() assert isinstance(prod, Producer) assert prod.channel is self.channel def test_interface_get_bindings(self): with pytest.raises(NotImplementedError): StdChannel().get_bindings() def test_interface_after_reply_message_received(self): assert StdChannel().after_reply_message_received(Queue('foo')) is None class test_Message: def setup(self): self.conn = Connection('memory://') self.channel = self.conn.channel() self.message = Message(channel=self.channel, delivery_tag=313) def test_postencode(self): m = Message(text_t('FOO'), channel=self.channel, postencode='ccyzz') with pytest.raises(LookupError): m._reraise_error() m.ack() def test_ack_respects_no_ack_consumers(self): self.channel.no_ack_consumers = {'abc'} self.message.delivery_info['consumer_tag'] = 'abc' ack = self.channel.basic_ack = Mock() self.message.ack() assert self.message._state != 'ACK' ack.assert_not_called() def test_ack_missing_consumer_tag(self): self.channel.no_ack_consumers = {'abc'} self.message.delivery_info = {} ack = self.channel.basic_ack = Mock() self.message.ack() ack.assert_called_with(self.message.delivery_tag, multiple=False) def test_ack_not_no_ack(self): self.channel.no_ack_consumers = set() self.message.delivery_info['consumer_tag'] = 'abc' ack = self.channel.basic_ack = Mock() self.message.ack() ack.assert_called_with(self.message.delivery_tag, multiple=False) def test_ack_log_error_when_no_error(self): ack = self.message.ack = Mock() self.message.ack_log_error(Mock(), KeyError) ack.assert_called_with(multiple=False) def test_ack_log_error_when_error(self): ack = self.message.ack = Mock() ack.side_effect = KeyError('foo') logger = Mock() self.message.ack_log_error(logger, KeyError) ack.assert_called_with(multiple=False) logger.critical.assert_called() assert "Couldn't ack" in logger.critical.call_args[0][0] def test_reject_log_error_when_no_error(self): reject = self.message.reject = Mock() self.message.reject_log_error(Mock(), KeyError, requeue=True) reject.assert_called_with(requeue=True) def test_reject_log_error_when_error(self): reject = self.message.reject = Mock() reject.side_effect = KeyError('foo') logger = Mock() self.message.reject_log_error(logger, KeyError) reject.assert_called_with(requeue=False) logger.critical.assert_called() assert "Couldn't reject" in logger.critical.call_args[0][0] class test_interface: def test_establish_connection(self): with pytest.raises(NotImplementedError): Transport(None).establish_connection() def test_close_connection(self): with pytest.raises(NotImplementedError): Transport(None).close_connection(None) def test_create_channel(self): with pytest.raises(NotImplementedError): Transport(None).create_channel(None) def test_close_channel(self): with pytest.raises(NotImplementedError): Transport(None).close_channel(None) def test_drain_events(self): with pytest.raises(NotImplementedError): Transport(None).drain_events(None) def test_heartbeat_check(self): Transport(None).heartbeat_check(Mock(name='connection')) def test_driver_version(self): assert Transport(None).driver_version() def test_register_with_event_loop(self): Transport(None).register_with_event_loop( Mock(name='connection'), Mock(name='loop'), ) def test_unregister_from_event_loop(self): Transport(None).unregister_from_event_loop( Mock(name='connection'), Mock(name='loop'), ) def test_manager(self): assert Transport(None).manager class test_Management: def test_get_bindings(self): m = Management(Mock(name='transport')) with pytest.raises(NotImplementedError): m.get_bindings() kombu-4.1.0/t/unit/transport/test_zookeeper.py0000644000175000017500000000211313130603207021441 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import skip from kombu import Connection from kombu.transport import zookeeper @skip.unless_module('kazoo') class test_Channel: def setup(self): self.connection = self.create_connection() self.channel = self.connection.default_channel def create_connection(self, **kwargs): return Connection(transport=zookeeper.Transport, **kwargs) def teardown(self): self.connection.close() def test_put_puts_bytes_to_queue(self): class AssertQueue: def put(self, value, priority): assert isinstance(value, bytes) self.channel._queues['foo'] = AssertQueue() self.channel._put(queue='foo', message='bar') @pytest.mark.parametrize('input,expected', ( ('', '/'), ('/root', '/root'), ('/root/', '/root'), )) def test_virtual_host_normalization(self, input, expected): with self.create_connection(virtual_host=input) as conn: assert conn.default_channel._vhost == expected kombu-4.1.0/t/unit/transport/test_mongodb.py0000644000175000017500000004050313130603207021070 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import datetime import pytest from case import MagicMock, call, patch, skip from kombu import Connection from kombu.five import Empty def _create_mock_connection(url='', **kwargs): from kombu.transport import mongodb # noqa class _Channel(mongodb.Channel): # reset _fanout_queues for each instance _fanout_queues = {} collections = {} now = datetime.datetime.utcnow() def _create_client(self): mock = MagicMock(name='client') # we need new mock object for every collection def get_collection(name): try: return self.collections[name] except KeyError: mock = self.collections[name] = MagicMock( name='collection:%s' % name) return mock mock.__getitem__.side_effect = get_collection return mock def get_now(self): return self.now class Transport(mongodb.Transport): Channel = _Channel return Connection(url, transport=Transport, **kwargs) @skip.unless_module('pymongo') class test_mongodb_uri_parsing: def test_defaults(self): url = 'mongodb://' channel = _create_mock_connection(url).default_channel hostname, dbname, options = channel._parse_uri() assert dbname == 'kombu_default' assert hostname == 'mongodb://127.0.0.1' def test_custom_host(self): url = 'mongodb://localhost' channel = _create_mock_connection(url).default_channel hostname, dbname, options = channel._parse_uri() assert dbname == 'kombu_default' def test_custom_database(self): url = 'mongodb://localhost/dbname' channel = _create_mock_connection(url).default_channel hostname, dbname, options = channel._parse_uri() assert dbname == 'dbname' def test_custom_credentials(self): url = 'mongodb://localhost/dbname' channel = _create_mock_connection( url, userid='foo', password='bar').default_channel hostname, dbname, options = channel._parse_uri() assert hostname == 'mongodb://foo:bar@localhost/dbname' assert dbname == 'dbname' def test_correct_readpreference(self): url = 'mongodb://localhost/dbname?readpreference=nearest' channel = _create_mock_connection(url).default_channel hostname, dbname, options = channel._parse_uri() assert options['readpreference'] == 'nearest' class BaseMongoDBChannelCase: def _get_method(self, cname, mname): collection = getattr(self.channel, cname) method = getattr(collection, mname.split('.', 1)[0]) for bit in mname.split('.')[1:]: method = getattr(method.return_value, bit) return method def set_operation_return_value(self, cname, mname, *values): method = self._get_method(cname, mname) if len(values) == 1: method.return_value = values[0] else: method.side_effect = values def declare_droadcast_queue(self, queue): self.channel.exchange_declare('fanout_exchange', type='fanout') self.channel._queue_bind('fanout_exchange', 'foo', '*', queue) assert queue in self.channel._broadcast_cursors def get_broadcast(self, queue): return self.channel._broadcast_cursors[queue] def set_broadcast_return_value(self, queue, *values): self.declare_droadcast_queue(queue) cursor = MagicMock(name='cursor') cursor.__iter__.return_value = iter(values) self.channel._broadcast_cursors[queue]._cursor = iter(cursor) def assert_collection_accessed(self, *collections): self.channel.client.__getitem__.assert_has_calls( [call(c) for c in collections], any_order=True) def assert_operation_has_calls(self, cname, mname, calls, any_order=False): method = self._get_method(cname, mname) method.assert_has_calls(calls, any_order=any_order) def assert_operation_called_with(self, cname, mname, *args, **kwargs): self.assert_operation_has_calls(cname, mname, [call(*args, **kwargs)]) @skip.unless_module('pymongo') class test_mongodb_channel(BaseMongoDBChannelCase): def setup(self): self.connection = _create_mock_connection() self.channel = self.connection.default_channel # Tests for "public" channel interface def test_new_queue(self): self.channel._new_queue('foobar') self.channel.client.assert_not_called() def test_get(self): import pymongo self.set_operation_return_value('messages', 'find_and_modify', { '_id': 'docId', 'payload': '{"some": "data"}', }) event = self.channel._get('foobar') self.assert_collection_accessed('messages') self.assert_operation_called_with( 'messages', 'find_and_modify', query={'queue': 'foobar'}, remove=True, sort=[ ('priority', pymongo.ASCENDING), ], ) assert event == {'some': 'data'} self.set_operation_return_value('messages', 'find_and_modify', None) with pytest.raises(Empty): self.channel._get('foobar') def test_get_fanout(self): self.set_broadcast_return_value('foobar', { '_id': 'docId1', 'payload': '{"some": "data"}', }) event = self.channel._get('foobar') self.assert_collection_accessed('messages.broadcast') assert event == {'some': 'data'} with pytest.raises(Empty): self.channel._get('foobar') def test_put(self): self.channel._put('foobar', {'some': 'data'}) self.assert_collection_accessed('messages') self.assert_operation_called_with('messages', 'insert', { 'queue': 'foobar', 'priority': 9, 'payload': '{"some": "data"}', }) def test_put_fanout(self): self.declare_droadcast_queue('foobar') self.channel._put_fanout('foobar', {'some': 'data'}, 'foo') self.assert_collection_accessed('messages.broadcast') self.assert_operation_called_with('broadcast', 'insert', { 'queue': 'foobar', 'payload': '{"some": "data"}', }) def test_size(self): self.set_operation_return_value('messages', 'find.count', 77) result = self.channel._size('foobar') self.assert_collection_accessed('messages') self.assert_operation_called_with( 'messages', 'find', {'queue': 'foobar'}, ) assert result == 77 def test_size_fanout(self): self.declare_droadcast_queue('foobar') cursor = MagicMock(name='cursor') cursor.get_size.return_value = 77 self.channel._broadcast_cursors['foobar'] = cursor result = self.channel._size('foobar') assert result == 77 def test_purge(self): self.set_operation_return_value('messages', 'find.count', 77) result = self.channel._purge('foobar') self.assert_collection_accessed('messages') self.assert_operation_called_with( 'messages', 'remove', {'queue': 'foobar'}, ) assert result == 77 def test_purge_fanout(self): self.declare_droadcast_queue('foobar') cursor = MagicMock(name='cursor') cursor.get_size.return_value = 77 self.channel._broadcast_cursors['foobar'] = cursor result = self.channel._purge('foobar') cursor.purge.assert_any_call() assert result == 77 def test_get_table(self): state_table = [('foo', '*', 'foo')] stored_table = [('bar', '*', 'bar')] self.channel.exchange_declare('test_exchange') self.channel.state.exchanges['test_exchange']['table'] = state_table self.set_operation_return_value('routing', 'find', [{ '_id': 'docId', 'routing_key': stored_table[0][0], 'pattern': stored_table[0][1], 'queue': stored_table[0][2], }]) result = self.channel.get_table('test_exchange') self.assert_collection_accessed('messages.routing') self.assert_operation_called_with( 'routing', 'find', {'exchange': 'test_exchange'}, ) assert set(result) == frozenset(state_table) | frozenset(stored_table) def test_queue_bind(self): self.channel._queue_bind('test_exchange', 'foo', '*', 'foo') self.assert_collection_accessed('messages.routing') self.assert_operation_called_with( 'routing', 'update', {'queue': 'foo', 'pattern': '*', 'routing_key': 'foo', 'exchange': 'test_exchange'}, {'queue': 'foo', 'pattern': '*', 'routing_key': 'foo', 'exchange': 'test_exchange'}, upsert=True, ) def test_queue_delete(self): self.channel.queue_delete('foobar') self.assert_collection_accessed('messages.routing') self.assert_operation_called_with( 'routing', 'remove', {'queue': 'foobar'}, ) def test_queue_delete_fanout(self): self.declare_droadcast_queue('foobar') cursor = MagicMock(name='cursor') self.channel._broadcast_cursors['foobar'] = cursor self.channel.queue_delete('foobar') cursor.close.assert_any_call() assert 'foobar' not in self.channel._broadcast_cursors assert 'foobar' not in self.channel._fanout_queues # Tests for channel internals def test_create_broadcast(self): self.channel._create_broadcast(self.channel.client) self.channel.client.create_collection.assert_called_with( 'messages.broadcast', capped=True, size=100000, ) def test_ensure_indexes(self): self.channel._ensure_indexes(self.channel.client) self.assert_operation_called_with( 'messages', 'ensure_index', [('queue', 1), ('priority', 1), ('_id', 1)], background=True, ) self.assert_operation_called_with( 'broadcast', 'ensure_index', [('queue', 1)], ) self.assert_operation_called_with( 'routing', 'ensure_index', [('queue', 1), ('exchange', 1)], ) def test_create_broadcast_cursor(self): import pymongo with patch.object(pymongo, 'version_tuple', (2, )): self.channel._create_broadcast_cursor( 'fanout_exchange', 'foo', '*', 'foobar', ) self.assert_collection_accessed('messages.broadcast') self.assert_operation_called_with( 'broadcast', 'find', tailable=True, query={'queue': 'fanout_exchange'}, ) if pymongo.version_tuple >= (3, ): self.channel._create_broadcast_cursor( 'fanout_exchange1', 'foo', '*', 'foobar', ) self.assert_collection_accessed('messages.broadcast') self.assert_operation_called_with( 'broadcast', 'find', cursor_type=pymongo.CursorType.TAILABLE, filter={'queue': 'fanout_exchange1'}, ) @skip.unless_module('pymongo') class test_mongodb_channel_ttl(BaseMongoDBChannelCase): def setup(self): self.connection = _create_mock_connection( transport_options={'ttl': True}, ) self.channel = self.connection.default_channel self.expire_at = ( self.channel.get_now() + datetime.timedelta(milliseconds=777)) # Tests def test_new_queue(self): self.channel._new_queue('foobar') self.assert_operation_called_with( 'queues', 'update', {'_id': 'foobar'}, {'_id': 'foobar', 'options': {}, 'expire_at': None}, upsert=True, ) def test_get(self): import pymongo self.set_operation_return_value('queues', 'find_one', { '_id': 'docId', 'options': {'arguments': {'x-expires': 777}}, }) self.set_operation_return_value('messages', 'find_and_modify', { '_id': 'docId', 'payload': '{"some": "data"}', }) self.channel._get('foobar') self.assert_collection_accessed('messages', 'messages.queues') self.assert_operation_called_with( 'messages', 'find_and_modify', query={'queue': 'foobar'}, remove=True, sort=[ ('priority', pymongo.ASCENDING), ], ) self.assert_operation_called_with( 'routing', 'update', {'queue': 'foobar'}, {'$set': {'expire_at': self.expire_at}}, multiple=True, ) def test_put(self): self.set_operation_return_value('queues', 'find_one', { '_id': 'docId', 'options': {'arguments': {'x-message-ttl': 777}}, }) self.channel._put('foobar', {'some': 'data'}) self.assert_collection_accessed('messages') self.assert_operation_called_with('messages', 'insert', { 'queue': 'foobar', 'priority': 9, 'payload': '{"some": "data"}', 'expire_at': self.expire_at, }) def test_queue_bind(self): self.set_operation_return_value('queues', 'find_one', { '_id': 'docId', 'options': {'arguments': {'x-expires': 777}}, }) self.channel._queue_bind('test_exchange', 'foo', '*', 'foo') self.assert_collection_accessed('messages.routing') self.assert_operation_called_with( 'routing', 'update', {'queue': 'foo', 'pattern': '*', 'routing_key': 'foo', 'exchange': 'test_exchange'}, {'queue': 'foo', 'pattern': '*', 'routing_key': 'foo', 'exchange': 'test_exchange', 'expire_at': self.expire_at}, upsert=True, ) def test_queue_delete(self): self.channel.queue_delete('foobar') self.assert_collection_accessed('messages.queues') self.assert_operation_called_with( 'queues', 'remove', {'_id': 'foobar'}) def test_ensure_indexes(self): self.channel._ensure_indexes(self.channel.client) self.assert_operation_called_with( 'messages', 'ensure_index', [('expire_at', 1)], expireAfterSeconds=0) self.assert_operation_called_with( 'routing', 'ensure_index', [('expire_at', 1)], expireAfterSeconds=0) self.assert_operation_called_with( 'queues', 'ensure_index', [('expire_at', 1)], expireAfterSeconds=0) def test_get_expire(self): result = self.channel._get_expire( {'arguments': {'x-expires': 777}}, 'x-expires') self.channel.client.assert_not_called() assert result == self.expire_at self.set_operation_return_value('queues', 'find_one', { '_id': 'docId', 'options': {'arguments': {'x-expires': 777}}, }) result = self.channel._get_expire('foobar', 'x-expires') assert result == self.expire_at def test_update_queues_expire(self): self.set_operation_return_value('queues', 'find_one', { '_id': 'docId', 'options': {'arguments': {'x-expires': 777}}, }) self.channel._update_queues_expire('foobar') self.assert_collection_accessed('messages.routing', 'messages.queues') self.assert_operation_called_with( 'routing', 'update', {'queue': 'foobar'}, {'$set': {'expire_at': self.expire_at}}, multiple=True, ) self.assert_operation_called_with( 'queues', 'update', {'_id': 'foobar'}, {'$set': {'expire_at': self.expire_at}}, multiple=True, ) @skip.unless_module('pymongo') class test_mongodb_channel_calc_queue_size(BaseMongoDBChannelCase): def setup(self): self.connection = _create_mock_connection( transport_options={'calc_queue_size': False}) self.channel = self.connection.default_channel self.expire_at = ( self.channel.get_now() + datetime.timedelta(milliseconds=777)) # Tests def test_size(self): self.set_operation_return_value('messages', 'find.count', 77) result = self.channel._size('foobar') self.assert_operation_has_calls('messages', 'find', []) assert result == 0 kombu-4.1.0/t/unit/transport/test_etcd.py0000644000175000017500000000425513130603207020366 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, patch, skip from kombu.five import Empty from kombu.transport.etcd import Channel, Transport @skip.unless_module('etcd') class test_Etcd: def setup(self): self.connection = Mock() self.connection.client.transport_options = {} self.connection.client.port = 2739 self.client = self.patch('etcd.Client').return_value self.channel = Channel(connection=self.connection) def test_driver_version(self): assert Transport(self.connection.client).driver_version() def test_failed_get(self): self.channel._acquire_lock = Mock(return_value=False) self.channel.client.read.side_effect = IndexError with patch('etcd.Lock'): with pytest.raises(Empty): self.channel._get('empty')() def test_test_purge(self): with patch('etcd.Lock'): self.client.delete = Mock(return_value=True) assert self.channel._purge('foo') def test_key_prefix(self): key = self.channel._key_prefix('myqueue') assert key == 'kombu/myqueue' def test_create_delete_queue(self): queue = 'mynewqueue' with patch('etcd.Lock'): self.client.write.return_value = self.patch('etcd.EtcdResult') assert self.channel._new_queue(queue) self.client.delete.return_value = self.patch('etcd.EtcdResult') self.channel._delete(queue) def test_size(self): with patch('etcd.Lock'): self.client.read.return_value = self.patch( 'etcd.EtcdResult', _children=[{}, {}]) assert self.channel._size('q') == 2 def test_get(self): with patch('etcd.Lock'): self.client.read.return_value = self.patch( 'etcd.EtcdResult', _children=[{'key': 'myqueue', 'modifyIndex': 1, 'value': '1'}]) assert self.channel._get('myqueue') is not None def test_put(self): with patch('etcd.Lock'): self.client.write.return_value = self.patch('etcd.EtcdResult') assert self.channel._put('myqueue', 'mydata') is None kombu-4.1.0/t/unit/transport/test_consul.py0000644000175000017500000000501613130603207020746 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock, skip from kombu.five import Empty from kombu.transport.consul import Channel, Transport @skip.unless_module('consul') class test_Consul: def setup(self): self.connection = Mock() self.connection.client.transport_options = {} self.connection.client.port = 303 self.consul = self.patching('consul.Consul').return_value self.channel = Channel(connection=self.connection) def test_driver_version(self): assert Transport(self.connection.client).driver_version() def test_failed_get(self): self.channel._acquire_lock = Mock(return_value=False) self.channel.client.kv.get.return_value = (1, None) with pytest.raises(Empty): self.channel._get('empty')() def test_test_purge(self): self.channel._destroy_session = Mock(return_value=True) self.consul.kv.delete = Mock(return_value=True) assert self.channel._purge('foo') def test_variables(self): assert self.channel.session_ttl == 30 assert self.channel.timeout == '10s' def test_lock_key(self): key = self.channel._lock_key('myqueue') assert key == 'kombu/myqueue.lock' def test_key_prefix(self): key = self.channel._key_prefix('myqueue') assert key == 'kombu/myqueue' def test_get_or_create_session(self): queue = 'myqueue' session_id = '123456' self.consul.session.create.return_value = session_id assert self.channel._get_or_create_session(queue) == session_id def test_create_delete_queue(self): queue = 'mynewqueue' self.consul.kv.put.return_value = True assert self.channel._new_queue(queue) self.consul.kv.delete.return_value = True self.channel._destroy_session = Mock() self.channel._delete(queue) def test_size(self): self.consul.kv.get.return_value = [(1, {}), (2, {})] assert self.channel._size('q') == 2 def test_get(self): self.channel._obtain_lock = Mock(return_value=True) self.channel._release_lock = Mock(return_value=True) self.consul.kv.get.return_value = [1, [ {'Key': 'myqueue', 'ModifyIndex': 1, 'Value': '1'}, ]] self.consul.kv.delete.return_value = True assert self.channel._get('myqueue') is not None def test_put(self): self.consul.kv.put.return_value = True assert self.channel._put('myqueue', 'mydata') is None kombu-4.1.0/t/unit/test_mixins.py0000644000175000017500000001730213130603207016717 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import socket from case import ContextMock, Mock, patch from kombu.mixins import ConsumerMixin def Message(body, content_type='text/plain', content_encoding='utf-8'): m = Mock(name='Message') m.body = body m.content_type = content_type m.content_encoding = content_encoding return m class Cons(ConsumerMixin): def __init__(self, consumers): self.calls = Mock(name='ConsumerMixin') self.calls.get_consumers.return_value = consumers self.get_consumers = self.calls.get_consumers self.on_connection_revived = self.calls.on_connection_revived self.on_consume_ready = self.calls.on_consume_ready self.on_consume_end = self.calls.on_consume_end self.on_iteration = self.calls.on_iteration self.on_decode_error = self.calls.on_decode_error self.on_connection_error = self.calls.on_connection_error self.extra_context = ContextMock(name='extra_context') self.extra_context.return_value = self.extra_context class test_ConsumerMixin: def _context(self): Acons = ContextMock(name='consumerA') Bcons = ContextMock(name='consumerB') c = Cons([Acons, Bcons]) _conn = c.connection = ContextMock(name='connection') est = c.establish_connection = Mock(name='est_connection') est.return_value = _conn return c, Acons, Bcons def test_consume(self): c, Acons, Bcons = self._context() c.should_stop = False it = c.consume(no_ack=True) next(it) Acons.__enter__.assert_called_with() Bcons.__enter__.assert_called_with() c.extra_context.__enter__.assert_called_with() c.on_consume_ready.assert_called() c.on_iteration.assert_called_with() c.connection.drain_events.assert_called_with(timeout=1) next(it) next(it) next(it) c.should_stop = True with pytest.raises(StopIteration): next(it) def test_consume_drain_raises_socket_error(self): c, Acons, Bcons = self._context() c.should_stop = False it = c.consume(no_ack=True) c.connection.drain_events.side_effect = socket.error with pytest.raises(socket.error): next(it) def se2(*args, **kwargs): c.should_stop = True raise socket.error() c.connection.drain_events.side_effect = se2 it = c.consume(no_ack=True) with pytest.raises(StopIteration): next(it) def test_consume_drain_raises_socket_timeout(self): c, Acons, Bcons = self._context() c.should_stop = False it = c.consume(no_ack=True, timeout=1) def se(*args, **kwargs): c.should_stop = True raise socket.timeout() c.connection.drain_events.side_effect = se with pytest.raises(socket.error): next(it) def test_Consumer_context(self): c, Acons, Bcons = self._context() with c.Consumer() as (conn, channel, consumer): assert conn is c.connection assert channel is conn.default_channel c.on_connection_revived.assert_called_with() c.get_consumers.assert_called() cls = c.get_consumers.call_args[0][0] subcons = cls() assert subcons.on_decode_error is c.on_decode_error assert subcons.channel is conn.default_channel Acons.__enter__.assert_called_with() Bcons.__enter__.assert_called_with() c.on_consume_end.assert_called_with(conn, channel) class test_ConsumerMixin_interface: def setup(self): self.c = ConsumerMixin() def test_get_consumers(self): with pytest.raises(NotImplementedError): self.c.get_consumers(Mock(), Mock()) def test_on_connection_revived(self): assert self.c.on_connection_revived() is None def test_on_consume_ready(self): assert self.c.on_consume_ready(Mock(), Mock(), []) is None def test_on_consume_end(self): assert self.c.on_consume_end(Mock(), Mock()) is None def test_on_iteration(self): assert self.c.on_iteration() is None def test_on_decode_error(self): message = Message('foo') with patch('kombu.mixins.error') as error: self.c.on_decode_error(message, KeyError('foo')) error.assert_called() message.ack.assert_called_with() def test_on_connection_error(self): with patch('kombu.mixins.warn') as warn: self.c.on_connection_error(KeyError('foo'), 3) warn.assert_called() def test_extra_context(self): with self.c.extra_context(Mock(), Mock()): pass def test_restart_limit(self): assert self.c.restart_limit def test_connection_errors(self): conn = Mock(name='connection') self.c.connection = conn conn.connection_errors = (KeyError,) assert self.c.connection_errors == conn.connection_errors conn.channel_errors = (ValueError,) assert self.c.channel_errors == conn.channel_errors def test__consume_from(self): a = ContextMock(name='A') b = ContextMock(name='B') a.__enter__ = Mock(name='A.__enter__') b.__enter__ = Mock(name='B.__enter__') with self.c._consume_from(a, b): pass a.__enter__.assert_called_with() b.__enter__.assert_called_with() def test_establish_connection(self): conn = ContextMock(name='connection') conn.clone.return_value = conn self.c.connection = conn self.c.connect_max_retries = 3 with self.c.establish_connection() as conn: assert conn conn.ensure_connection.assert_called_with( self.c.on_connection_error, 3, ) def test_maybe_conn_error(self): conn = ContextMock(name='connection') conn.connection_errors = (KeyError,) conn.channel_errors = () self.c.connection = conn def raises(): raise KeyError('foo') self.c.maybe_conn_error(raises) def test_run(self): conn = ContextMock(name='connection') self.c.connection = conn conn.connection_errors = (KeyError,) conn.channel_errors = () consume = self.c.consume = Mock(name='c.consume') def se(*args, **kwargs): self.c.should_stop = True return [1] self.c.should_stop = False consume.side_effect = se self.c.run() def test_run_restart_rate_limited(self): conn = ContextMock(name='connection') self.c.connection = conn conn.connection_errors = (KeyError,) conn.channel_errors = () consume = self.c.consume = Mock(name='c.consume') with patch('kombu.mixins.sleep') as sleep: counter = [0] def se(*args, **kwargs): if counter[0] >= 1: self.c.should_stop = True counter[0] += 1 return counter self.c.should_stop = False consume.side_effect = se self.c.run() sleep.assert_called() def test_run_raises(self): conn = ContextMock(name='connection') self.c.connection = conn conn.connection_errors = (KeyError,) conn.channel_errors = () consume = self.c.consume = Mock(name='c.consume') with patch('kombu.mixins.warn') as warn: def se_raises(*args, **kwargs): self.c.should_stop = True raise KeyError('foo') self.c.should_stop = False consume.side_effect = se_raises self.c.run() warn.assert_called() kombu-4.1.0/t/unit/test_serialization.py0000644000175000017500000002530613130603207020270 0ustar omeromer00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import pytest import sys from base64 import b64decode from case import call, mock, patch, skip from kombu.exceptions import ContentDisallowed, EncodeError, DecodeError from kombu.five import text_t, bytes_t from kombu.serialization import ( registry, register, SerializerNotInstalled, raw_encode, register_yaml, register_msgpack, dumps, loads, pickle, pickle_protocol, unregister, register_pickle, enable_insecure_serializers, disable_insecure_serializers, ) from kombu.utils.encoding import str_to_bytes # For content_encoding tests unicode_string = 'abcdé\u8463' unicode_string_as_utf8 = unicode_string.encode('utf-8') latin_string = 'abcdé' latin_string_as_latin1 = latin_string.encode('latin-1') latin_string_as_utf8 = latin_string.encode('utf-8') # For serialization tests py_data = { 'string': 'The quick brown fox jumps over the lazy dog', 'int': 10, 'float': 3.14159265, 'unicode': 'Thé quick brown fox jumps over thé lazy dog', 'list': ['george', 'jerry', 'elaine', 'cosmo'], } # JSON serialization tests json_data = """\ {"int": 10, "float": 3.1415926500000002, \ "list": ["george", "jerry", "elaine", "cosmo"], \ "string": "The quick brown fox jumps over the lazy \ dog", "unicode": "Th\\u00e9 quick brown fox jumps over \ th\\u00e9 lazy dog"}\ """ # Pickle serialization tests pickle_data = pickle.dumps(py_data, protocol=pickle_protocol) # YAML serialization tests yaml_data = """\ float: 3.1415926500000002 int: 10 list: [george, jerry, elaine, cosmo] string: The quick brown fox jumps over the lazy dog unicode: "Th\\xE9 quick brown fox jumps over th\\xE9 lazy dog" """ msgpack_py_data = dict(py_data) msgpack_py_data['unicode'] = 'Th quick brown fox jumps over th lazy dog' # Unicode chars are lost in transmit :( msgpack_data = b64decode(str_to_bytes("""\ haNpbnQKpWZsb2F0y0AJIftTyNTxpGxpc3SUpmdlb3JnZaVqZXJyeaZlbGFpbmWlY29zbW+mc3Rya\ W5n2gArVGhlIHF1aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIHRoZSBsYXp5IGRvZ6d1bmljb2Rl2g\ ApVGggcXVpY2sgYnJvd24gZm94IGp1bXBzIG92ZXIgdGggbGF6eSBkb2c=\ """)) registry.register('testS', lambda s: s, lambda s: 'decoded', 'application/testS', 'utf-8') class test_Serialization: def test_disable(self): disabled = registry._disabled_content_types try: registry.disable('testS') assert 'application/testS' in disabled disabled.clear() registry.disable('application/testS') assert 'application/testS' in disabled finally: disabled.clear() def test_enable(self): registry._disabled_content_types.add('application/json') registry.enable('json') assert 'application/json' not in registry._disabled_content_types registry._disabled_content_types.add('application/json') registry.enable('application/json') assert 'application/json' not in registry._disabled_content_types def test_loads_when_disabled(self): disabled = registry._disabled_content_types try: registry.disable('testS') with pytest.raises(SerializerNotInstalled): loads('xxd', 'application/testS', 'utf-8', force=False) ret = loads('xxd', 'application/testS', 'utf-8', force=True) assert ret == 'decoded' finally: disabled.clear() def test_loads_when_data_is_None(self): loads(None, 'application/testS', 'utf-8') def test_content_type_decoding(self): assert loads( unicode_string_as_utf8, content_type='plain/text', content_encoding='utf-8') == unicode_string assert loads( latin_string_as_latin1, content_type='application/data', content_encoding='latin-1') == latin_string def test_content_type_binary(self): assert isinstance( loads(unicode_string_as_utf8, content_type='application/data', content_encoding='binary'), bytes_t) assert loads( unicode_string_as_utf8, content_type='application/data', content_encoding='binary') == unicode_string_as_utf8 def test_content_type_encoding(self): # Using the 'raw' serializer assert (dumps(unicode_string, serializer='raw')[-1] == unicode_string_as_utf8) assert (dumps(latin_string, serializer='raw')[-1] == latin_string_as_utf8) # And again w/o a specific serializer to check the # code where we force unicode objects into a string. assert dumps(unicode_string)[-1] == unicode_string_as_utf8 assert dumps(latin_string)[-1] == latin_string_as_utf8 def test_enable_insecure_serializers(self): with patch('kombu.serialization.registry') as registry: enable_insecure_serializers() registry.assert_has_calls([ call.enable('pickle'), call.enable('yaml'), call.enable('msgpack'), ]) registry.enable.side_effect = KeyError() enable_insecure_serializers() with patch('kombu.serialization.registry') as registry: enable_insecure_serializers(['msgpack']) registry.assert_has_calls([call.enable('msgpack')]) def test_disable_insecure_serializers(self): with patch('kombu.serialization.registry') as registry: registry._decoders = ['pickle', 'yaml', 'doomsday'] disable_insecure_serializers(allowed=['doomsday']) registry.disable.assert_has_calls([call('pickle'), call('yaml')]) registry.enable.assert_has_calls([call('doomsday')]) disable_insecure_serializers(allowed=None) registry.disable.assert_has_calls([ call('pickle'), call('yaml'), call('doomsday') ]) def test_reraises_EncodeError(self): with pytest.raises(EncodeError): dumps([object()], serializer='json') def test_reraises_DecodeError(self): with pytest.raises(DecodeError): loads(object(), content_type='application/json', content_encoding='utf-8') def test_json_loads(self): assert loads(json_data, content_type='application/json', content_encoding='utf-8') == py_data def test_json_dumps(self): a = loads( dumps(py_data, serializer='json')[-1], content_type='application/json', content_encoding='utf-8', ) b = loads( json_data, content_type='application/json', content_encoding='utf-8', ) assert a == b @skip.if_pypy() @skip.unless_module('msgpack', (ImportError, ValueError)) def test_msgpack_loads(self): register_msgpack() res = loads(msgpack_data, content_type='application/x-msgpack', content_encoding='binary') if sys.version_info[0] < 3: for k, v in res.items(): if isinstance(v, text_t): res[k] = v.encode() if isinstance(v, (list, tuple)): res[k] = [i.encode() for i in v] assert res == msgpack_py_data @skip.if_pypy() @skip.unless_module('msgpack', (ImportError, ValueError)) def test_msgpack_dumps(self): register_msgpack() a = loads( dumps(msgpack_py_data, serializer='msgpack')[-1], content_type='application/x-msgpack', content_encoding='binary', ) b = loads( msgpack_data, content_type='application/x-msgpack', content_encoding='binary', ) assert a == b @skip.unless_module('yaml') def test_yaml_loads(self): register_yaml() assert loads( yaml_data, content_type='application/x-yaml', content_encoding='utf-8') == py_data @skip.unless_module('yaml') def test_yaml_dumps(self): register_yaml() a = loads( dumps(py_data, serializer='yaml')[-1], content_type='application/x-yaml', content_encoding='utf-8', ) b = loads( yaml_data, content_type='application/x-yaml', content_encoding='utf-8', ) assert a == b def test_pickle_loads(self): assert loads( pickle_data, content_type='application/x-python-serialize', content_encoding='binary') == py_data def test_pickle_dumps(self): a = pickle.loads(pickle_data), b = pickle.loads(dumps(py_data, serializer='pickle')[-1]), assert a == b def test_register(self): register(None, None, None, None) def test_unregister(self): with pytest.raises(SerializerNotInstalled): unregister('nonexisting') dumps('foo', serializer='pickle') unregister('pickle') with pytest.raises(SerializerNotInstalled): dumps('foo', serializer='pickle') register_pickle() def test_set_default_serializer_missing(self): with pytest.raises(SerializerNotInstalled): registry._set_default_serializer('nonexisting') def test_dumps_missing(self): with pytest.raises(SerializerNotInstalled): dumps('foo', serializer='nonexisting') def test_dumps__no_serializer(self): ctyp, cenc, data = dumps(str_to_bytes('foo')) assert ctyp == 'application/data' assert cenc == 'binary' def test_loads__trusted_content(self): loads('tainted', 'application/data', 'binary', accept=[]) loads('tainted', 'application/text', 'utf-8', accept=[]) def test_loads__not_accepted(self): with pytest.raises(ContentDisallowed): loads('tainted', 'application/x-evil', 'binary', accept=[]) with pytest.raises(ContentDisallowed): loads('tainted', 'application/x-evil', 'binary', accept=['application/x-json']) assert loads('tainted', 'application/x-doomsday', 'binary', accept=['application/x-doomsday']) def test_raw_encode(self): assert raw_encode('foo'.encode('utf-8')) == ( 'application/data', 'binary', 'foo'.encode('utf-8'), ) @mock.mask_modules('yaml') def test_register_yaml__no_yaml(self): register_yaml() with pytest.raises(SerializerNotInstalled): loads('foo', 'application/x-yaml', 'utf-8') @mock.mask_modules('msgpack') def test_register_msgpack__no_msgpack(self): register_msgpack() with pytest.raises(SerializerNotInstalled): loads('foo', 'application/x-msgpack', 'utf-8') kombu-4.1.0/t/unit/test_log.py0000644000175000017500000001124113130603207016165 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import logging import sys from case import ANY, Mock, patch from kombu.log import ( get_logger, get_loglevel, safeify_format, Log, LogMixin, setup_logging, ) class test_get_logger: def test_when_string(self): l = get_logger('foo') assert l is logging.getLogger('foo') h1 = l.handlers[0] assert isinstance(h1, logging.NullHandler) def test_when_logger(self): l = get_logger(logging.getLogger('foo')) h1 = l.handlers[0] assert isinstance(h1, logging.NullHandler) def test_with_custom_handler(self): l = logging.getLogger('bar') handler = logging.NullHandler() l.addHandler(handler) l = get_logger('bar') assert l.handlers[0] is handler def test_get_loglevel(self): assert get_loglevel('DEBUG') == logging.DEBUG assert get_loglevel('ERROR') == logging.ERROR assert get_loglevel(logging.INFO) == logging.INFO def test_safe_format(): fmt = 'The %r jumped %x over the %s' args = ['frog', 'foo', 'elephant'] res = list(safeify_format(fmt, args)) assert [x.strip('u') for x in res] == ["'frog'", 'foo', 'elephant'] class test_LogMixin: def setup(self): self.log = Log('Log', Mock()) self.logger = self.log.logger def test_debug(self): self.log.debug('debug') self.logger.log.assert_called_with(logging.DEBUG, 'Log - debug') def test_info(self): self.log.info('info') self.logger.log.assert_called_with(logging.INFO, 'Log - info') def test_warning(self): self.log.warn('warning') self.logger.log.assert_called_with(logging.WARN, 'Log - warning') def test_error(self): self.log.error('error', exc_info='exc') self.logger.log.assert_called_with( logging.ERROR, 'Log - error', exc_info='exc', ) def test_critical(self): self.log.critical('crit', exc_info='exc') self.logger.log.assert_called_with( logging.CRITICAL, 'Log - crit', exc_info='exc', ) def test_error_when_DISABLE_TRACEBACKS(self): from kombu import log log.DISABLE_TRACEBACKS = True try: self.log.error('error') self.logger.log.assert_called_with(logging.ERROR, 'Log - error') finally: log.DISABLE_TRACEBACKS = False def test_get_loglevel(self): assert self.log.get_loglevel('DEBUG') == logging.DEBUG assert self.log.get_loglevel('ERROR') == logging.ERROR assert self.log.get_loglevel(logging.INFO) == logging.INFO def test_is_enabled_for(self): self.logger.isEnabledFor.return_value = True assert self.log.is_enabled_for('DEBUG') self.logger.isEnabledFor.assert_called_with(logging.DEBUG) def test_LogMixin_get_logger(self): assert LogMixin().get_logger() is logging.getLogger('LogMixin') def test_Log_get_logger(self): assert Log('test_Log').get_logger() is logging.getLogger('test_Log') def test_log_when_not_enabled(self): self.logger.isEnabledFor.return_value = False self.log.debug('debug') self.logger.log.assert_not_called() def test_log_with_format(self): self.log.debug('Host %r removed', 'example.com') self.logger.log.assert_called_with( logging.DEBUG, 'Log - Host %s removed', ANY, ) assert self.logger.log.call_args[0][2].strip('u') == "'example.com'" class test_setup_logging: @patch('logging.getLogger') def test_set_up_default_values(self, getLogger): logger = logging.getLogger.return_value = Mock() logger.handlers = [] setup_logging() logger.setLevel.assert_called_with(logging.ERROR) logger.addHandler.assert_called() ah_args, _ = logger.addHandler.call_args handler = ah_args[0] assert isinstance(handler, logging.StreamHandler) assert handler.stream is sys.__stderr__ @patch('logging.getLogger') @patch('kombu.log.WatchedFileHandler') def test_setup_custom_values(self, getLogger, WatchedFileHandler): logger = logging.getLogger.return_value = Mock() logger.handlers = [] setup_logging(loglevel=logging.DEBUG, logfile='/var/logfile') logger.setLevel.assert_called_with(logging.DEBUG) logger.addHandler.assert_called() WatchedFileHandler.assert_called() @patch('logging.getLogger') def test_logger_already_setup(self, getLogger): logger = logging.getLogger.return_value = Mock() logger.handlers = [Mock()] setup_logging() logger.setLevel.assert_not_called() kombu-4.1.0/t/unit/conftest.py0000644000175000017500000000504513130603207016177 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import atexit import os import pytest import sys from kombu.exceptions import VersionMismatch @pytest.fixture(scope='session') def multiprocessing_workaround(request): yield # Workaround for multiprocessing bug where logging # is attempted after global already collected at shutdown. canceled = set() try: import multiprocessing.util canceled.add(multiprocessing.util._exit_function) except (AttributeError, ImportError): pass try: atexit._exithandlers[:] = [ e for e in atexit._exithandlers if e[0] not in canceled ] except AttributeError: # pragma: no cover pass # Py3 missing _exithandlers def zzz_reset_memory_transport_state(): yield from kombu.transport import memory memory.Transport.state.clear() @pytest.fixture(autouse=True) def test_cases_has_patching(request, patching): if request.instance: request.instance.patching = patching @pytest.fixture def hub(request): from kombu.async import Hub, get_event_loop, set_event_loop _prev_hub = get_event_loop() hub = Hub() set_event_loop(hub) yield hub if _prev_hub is not None: set_event_loop(_prev_hub) def find_distribution_modules(name=__name__, file=__file__): current_dist_depth = len(name.split('.')) - 1 current_dist = os.path.join(os.path.dirname(file), *([os.pardir] * current_dist_depth)) abs = os.path.abspath(current_dist) dist_name = os.path.basename(abs) for dirpath, dirnames, filenames in os.walk(abs): package = (dist_name + dirpath[len(abs):]).replace('/', '.') if '__init__.py' in filenames: yield package for filename in filenames: if filename.endswith('.py') and filename != '__init__.py': yield '.'.join([package, filename])[:-3] def import_all_modules(name=__name__, file=__file__, skip=[]): for module in find_distribution_modules(name, file): if module not in skip: print('preimporting %r for coverage...' % (module,)) try: __import__(module) except (ImportError, VersionMismatch, AttributeError): pass def is_in_coverage(): return (os.environ.get('COVER_ALL_MODULES') or any('--cov' in arg for arg in sys.argv)) @pytest.fixture(scope='session') def cover_all_modules(): # so coverage sees all our modules. if is_in_coverage(): import_all_modules() kombu-4.1.0/t/unit/test_compression.py0000644000175000017500000000233513130603207017751 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import sys from case import mock, skip from kombu import compression class test_compression: @mock.mask_modules('bz2') def test_no_bz2(self): c = sys.modules.pop('kombu.compression') try: import kombu.compression assert not hasattr(kombu.compression, 'bz2') finally: if c is not None: sys.modules['kombu.compression'] = c def test_encoders__gzip(self): assert 'application/x-gzip' in compression.encoders() @skip.unless_module('bz2') def test_encoders__bz2(self): assert 'application/x-bz2' in compression.encoders() def test_compress__decompress__zlib(self): text = b'The Quick Brown Fox Jumps Over The Lazy Dog' c, ctype = compression.compress(text, 'zlib') assert text != c d = compression.decompress(c, ctype) assert d == text @skip.unless_module('bz2') def test_compress__decompress__bzip2(self): text = b'The Brown Quick Fox Over The Lazy Dog Jumps' c, ctype = compression.compress(text, 'bzip2') assert text != c d = compression.decompress(c, ctype) assert d == text kombu-4.1.0/t/unit/test_simple.py0000644000175000017500000000617413130603207016706 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock from kombu import Connection, Exchange, Queue class SimpleBase: def Queue(self, name, *args, **kwargs): q = name if not isinstance(q, Queue): q = self.__class__.__name__ if name: q = '%s.%s' % (q, name) return self._Queue(q, *args, **kwargs) def _Queue(self, *args, **kwargs): raise NotImplementedError() def setup(self): self.connection = Connection(transport='memory') self.connection.default_channel.exchange_declare('amq.direct') self.q = self.Queue(None, no_ack=True) def teardown(self): self.q.close() self.connection.close() self.connection = None self.q = None def test_produce__consume(self): q = self.Queue('test_produce__consume', no_ack=True) q.put({'hello': 'Simple'}) assert q.get(timeout=1).payload == {'hello': 'Simple'} with pytest.raises(q.Empty): q.get(timeout=0.1) def test_produce__basic_get(self): q = self.Queue('test_produce__basic_get', no_ack=True) q.put({'hello': 'SimpleSync'}) assert q.get_nowait().payload == {'hello': 'SimpleSync'} with pytest.raises(q.Empty): q.get_nowait() q.put({'hello': 'SimpleSync'}) assert q.get(block=False).payload == {'hello': 'SimpleSync'} with pytest.raises(q.Empty): q.get(block=False) def test_clear(self): q = self.Queue('test_clear', no_ack=True) for i in range(10): q.put({'hello': 'SimplePurge%d' % (i,)}) assert q.clear() == 10 def test_enter_exit(self): q = self.Queue('test_enter_exit') q.close = Mock() assert q.__enter__() is q q.__exit__() q.close.assert_called_with() def test_qsize(self): q = self.Queue('test_clear', no_ack=True) for i in range(10): q.put({'hello': 'SimplePurge%d' % (i,)}) assert q.qsize() == 10 assert len(q) == 10 def test_autoclose(self): channel = self.connection.channel() q = self.Queue('test_autoclose', no_ack=True, channel=channel) q.close() def test_custom_Queue(self): n = self.__class__.__name__ exchange = Exchange('%s-test.custom.Queue' % (n,)) queue = Queue('%s-test.custom.Queue' % (n,), exchange, 'my.routing.key') q = self.Queue(queue) assert q.consumer.queues[0] == queue q.close() def test_bool(self): q = self.Queue('test_nonzero') assert q class test_SimpleQueue(SimpleBase): def _Queue(self, *args, **kwargs): return self.connection.SimpleQueue(*args, **kwargs) def test_is_ack(self): q = self.Queue('test_is_no_ack') assert not q.no_ack class test_SimpleBuffer(SimpleBase): def Queue(self, *args, **kwargs): return self.connection.SimpleBuffer(*args, **kwargs) def test_is_no_ack(self): q = self.Queue('test_is_no_ack') assert q.no_ack kombu-4.1.0/t/unit/test_clocks.py0000644000175000017500000000440113130603207016662 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pickle from heapq import heappush from time import time from case import Mock from kombu.clocks import LamportClock, timetuple class test_LamportClock: def test_clocks(self): c1 = LamportClock() c2 = LamportClock() c1.forward() c2.forward() c1.forward() c1.forward() c2.adjust(c1.value) assert c2.value == c1.value + 1 assert repr(c1) c2_val = c2.value c2.forward() c2.forward() c2.adjust(c1.value) assert c2.value == c2_val + 2 + 1 c1.adjust(c2.value) assert c1.value == c2.value + 1 def test_sort(self): c = LamportClock() pid1 = 'a.example.com:312' pid2 = 'b.example.com:311' events = [] m1 = (c.forward(), pid1) heappush(events, m1) m2 = (c.forward(), pid2) heappush(events, m2) m3 = (c.forward(), pid1) heappush(events, m3) m4 = (30, pid1) heappush(events, m4) m5 = (30, pid2) heappush(events, m5) assert str(c) == str(c.value) assert c.sort_heap(events) == m1 assert c.sort_heap([m4, m5]) == m4 assert c.sort_heap([m4, m5, m1]) == m4 class test_timetuple: def test_repr(self): x = timetuple(133, time(), 'id', Mock()) assert repr(x) def test_pickleable(self): x = timetuple(133, time(), 'id', 'obj') assert pickle.loads(pickle.dumps(x)) == tuple(x) def test_order(self): t1 = time() t2 = time() + 300 # windows clock not reliable a = timetuple(133, t1, 'A', 'obj') b = timetuple(140, t1, 'A', 'obj') assert a.__getnewargs__() assert a.clock == 133 assert a.timestamp == t1 assert a.id == 'A' assert a.obj == 'obj' assert a <= b assert b >= a assert (timetuple(134, time(), 'A', 'obj').__lt__(tuple()) is NotImplemented) assert timetuple(134, t2, 'A', 'obj') > timetuple(133, t1, 'A', 'obj') assert timetuple(134, t1, 'B', 'obj') > timetuple(134, t1, 'A', 'obj') assert (timetuple(None, t2, 'B', 'obj') > timetuple(None, t1, 'A', 'obj')) kombu-4.1.0/t/unit/test_pidbox.py0000644000175000017500000002211513130603207016673 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import socket import warnings from case import Mock, patch from kombu import Connection from kombu import pidbox from kombu.exceptions import ContentDisallowed, InconsistencyError from kombu.utils.uuid import uuid def is_cast(message): return message['method'] def is_call(message): return message['method'] and message['reply_to'] class test_Mailbox: class Mailbox(pidbox.Mailbox): def _collect(self, *args, **kwargs): return 'COLLECTED' def setup(self): self.mailbox = self.Mailbox('test_pidbox') self.connection = Connection(transport='memory') self.state = {'var': 1} self.handlers = {'mymethod': self._handler} self.bound = self.mailbox(self.connection) self.default_chan = self.connection.channel() self.node = self.bound.Node( 'test_pidbox', state=self.state, handlers=self.handlers, channel=self.default_chan, ) def _handler(self, state): return self.stats['var'] def test_publish_reply_ignores_InconsistencyError(self): mailbox = pidbox.Mailbox('test_reply__collect')(self.connection) with patch('kombu.pidbox.Producer') as Producer: producer = Producer.return_value = Mock(name='producer') producer.publish.side_effect = InconsistencyError() mailbox._publish_reply( {'foo': 'bar'}, mailbox.reply_exchange, mailbox.oid, 'foo', ) producer.publish.assert_called() def test_reply__collect(self): mailbox = pidbox.Mailbox('test_reply__collect')(self.connection) exchange = mailbox.reply_exchange.name channel = self.connection.channel() mailbox.reply_queue(channel).declare() ticket = uuid() mailbox._publish_reply({'foo': 'bar'}, exchange, mailbox.oid, ticket) _callback_called = [False] def callback(body): _callback_called[0] = True reply = mailbox._collect(ticket, limit=1, callback=callback, channel=channel) assert reply == [{'foo': 'bar'}] assert _callback_called[0] ticket = uuid() mailbox._publish_reply({'biz': 'boz'}, exchange, mailbox.oid, ticket) reply = mailbox._collect(ticket, limit=1, channel=channel) assert reply == [{'biz': 'boz'}] mailbox._publish_reply({'foo': 'BAM'}, exchange, mailbox.oid, 'doom', serializer='pickle') with pytest.raises(ContentDisallowed): reply = mailbox._collect('doom', limit=1, channel=channel) mailbox._publish_reply( {'foo': 'BAMBAM'}, exchange, mailbox.oid, 'doom', serializer='pickle', ) reply = mailbox._collect('doom', limit=1, channel=channel, accept=['pickle']) assert reply[0]['foo'] == 'BAMBAM' de = mailbox.connection.drain_events = Mock() de.side_effect = socket.timeout mailbox._collect(ticket, limit=1, channel=channel) def test_constructor(self): assert self.mailbox.connection is None assert self.mailbox.exchange.name assert self.mailbox.reply_exchange.name def test_bound(self): bound = self.mailbox(self.connection) assert bound.connection is self.connection def test_Node(self): assert self.node.hostname assert self.node.state assert self.node.mailbox is self.bound assert self.handlers # No initial handlers node2 = self.bound.Node('test_pidbox2', state=self.state) assert node2.handlers == {} def test_Node_consumer(self): consumer1 = self.node.Consumer() assert consumer1.channel is self.default_chan assert consumer1.no_ack chan2 = self.connection.channel() consumer2 = self.node.Consumer(channel=chan2, no_ack=False) assert consumer2.channel is chan2 assert not consumer2.no_ack def test_Node_consumer_multiple_listeners(self): warnings.resetwarnings() consumer = self.node.Consumer() q = consumer.queues[0] with warnings.catch_warnings(record=True) as log: q.on_declared('foo', 1, 1) assert log assert 'already using this' in log[0].message.args[0] with warnings.catch_warnings(record=True) as log: q.on_declared('foo', 1, 0) assert not log def test_handler(self): node = self.bound.Node('test_handler', state=self.state) @node.handler def my_handler_name(state): return 42 assert 'my_handler_name' in node.handlers def test_dispatch(self): node = self.bound.Node('test_dispatch', state=self.state) @node.handler def my_handler_name(state, x=None, y=None): return x + y assert node.dispatch('my_handler_name', arguments={'x': 10, 'y': 10}) == 20 def test_dispatch_raising_SystemExit(self): node = self.bound.Node('test_dispatch_raising_SystemExit', state=self.state) @node.handler def my_handler_name(state): raise SystemExit with pytest.raises(SystemExit): node.dispatch('my_handler_name') def test_dispatch_raising(self): node = self.bound.Node('test_dispatch_raising', state=self.state) @node.handler def my_handler_name(state): raise KeyError('foo') res = node.dispatch('my_handler_name') assert 'error' in res assert 'KeyError' in res['error'] def test_dispatch_replies(self): _replied = [False] def reply(data, **options): _replied[0] = True node = self.bound.Node('test_dispatch', state=self.state) node.reply = reply @node.handler def my_handler_name(state, x=None, y=None): return x + y node.dispatch('my_handler_name', arguments={'x': 10, 'y': 10}, reply_to={'exchange': 'foo', 'routing_key': 'bar'}) assert _replied[0] def test_reply(self): _replied = [(None, None, None)] def publish_reply(data, exchange, routing_key, ticket, **kwargs): _replied[0] = (data, exchange, routing_key, ticket) mailbox = self.mailbox(self.connection) mailbox._publish_reply = publish_reply node = mailbox.Node('test_reply') @node.handler def my_handler_name(state): return 42 node.dispatch('my_handler_name', reply_to={'exchange': 'exchange', 'routing_key': 'rkey'}, ticket='TICKET') data, exchange, routing_key, ticket = _replied[0] assert data == {'test_reply': 42} assert exchange == 'exchange' assert routing_key == 'rkey' assert ticket == 'TICKET' def test_handle_message(self): node = self.bound.Node('test_dispatch_from_message') @node.handler def my_handler_name(state, x=None, y=None): return x * y body = {'method': 'my_handler_name', 'arguments': {'x': 64, 'y': 64}} assert node.handle_message(body, None) == 64 * 64 # message not for me should not be processed. body['destination'] = ['some_other_node'] assert node.handle_message(body, None) is None def test_handle_message_adjusts_clock(self): node = self.bound.Node('test_adjusts_clock') @node.handler def my_handler_name(state): return 10 body = {'method': 'my_handler_name', 'arguments': {}} message = Mock(name='message') message.headers = {'clock': 313} node.adjust_clock = Mock(name='adjust_clock') res = node.handle_message(body, message) node.adjust_clock.assert_called_with(313) assert res == 10 def test_listen(self): consumer = self.node.listen() assert consumer.callbacks[0] == self.node.handle_message assert consumer.channel == self.default_chan def test_cast(self): self.bound.cast(['somenode'], 'mymethod') consumer = self.node.Consumer() assert is_cast(self.get_next(consumer)) def test_abcast(self): self.bound.abcast('mymethod') consumer = self.node.Consumer() assert is_cast(self.get_next(consumer)) def test_call_destination_must_be_sequence(self): with pytest.raises(ValueError): self.bound.call('some_node', 'mymethod') def test_call(self): assert self.bound.call(['some_node'], 'mymethod') == 'COLLECTED' consumer = self.node.Consumer() assert is_call(self.get_next(consumer)) def test_multi_call(self): assert self.bound.multi_call('mymethod') == 'COLLECTED' consumer = self.node.Consumer() assert is_call(self.get_next(consumer)) def get_next(self, consumer): m = consumer.queues[0].get() if m: return m.payload kombu-4.1.0/t/unit/utils/0000755000175000017500000000000013134154263015143 5ustar omeromer00000000000000kombu-4.1.0/t/unit/utils/test_uuid.py0000644000175000017500000000044713130603207017520 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from kombu.utils.uuid import uuid class test_UUID: def test_uuid4(self): assert uuid() != uuid() def test_uuid(self): i1 = uuid() i2 = uuid() assert isinstance(i1, str) assert i1 != i2 kombu-4.1.0/t/unit/utils/test_imports.py0000644000175000017500000000172413130603207020246 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock from kombu import Exchange from kombu.utils.imports import symbol_by_name class test_symbol_by_name: def test_instance_returns_instance(self): instance = object() assert symbol_by_name(instance) is instance def test_returns_default(self): default = object() assert symbol_by_name( 'xyz.ryx.qedoa.weq:foz', default=default) is default def test_no_default(self): with pytest.raises(ImportError): symbol_by_name('xyz.ryx.qedoa.weq:foz') def test_imp_reraises_ValueError(self): imp = Mock() imp.side_effect = ValueError() with pytest.raises(ValueError): symbol_by_name('kombu.Connection', imp=imp) def test_package(self): assert symbol_by_name('.entity:Exchange', package='kombu') is Exchange assert symbol_by_name(':Consumer', package='kombu') kombu-4.1.0/t/unit/utils/test_json.py0000644000175000017500000000473613130603207017530 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import pytz from datetime import datetime from decimal import Decimal from uuid import uuid4 from case import MagicMock, Mock, skip from kombu.five import text_t from kombu.utils.encoding import str_to_bytes from kombu.utils.json import _DecodeError, dumps, loads class Custom(object): def __init__(self, data): self.data = data def __json__(self): return self.data class test_JSONEncoder: def test_datetime(self): now = datetime.utcnow() now_utc = now.replace(tzinfo=pytz.utc) stripped = datetime(*now.timetuple()[:3]) serialized = loads(dumps({ 'datetime': now, 'tz': now_utc, 'date': now.date(), 'time': now.time()}, )) assert serialized == { 'datetime': now.isoformat(), 'tz': '{0}Z'.format(now_utc.isoformat().split('+', 1)[0]), 'time': now.time().isoformat(), 'date': stripped.isoformat(), } def test_Decimal(self): d = Decimal('3314132.13363235235324234123213213214134') assert loads(dumps({'d': d})), {'d': text_t(d)} def test_UUID(self): id = uuid4() assert loads(dumps({'u': id})), {'u': text_t(id)} def test_default(self): with pytest.raises(TypeError): dumps({'o': object()}) class test_dumps_loads: def test_dumps_custom_object(self): x = {'foo': Custom({'a': 'b'})} assert loads(dumps(x)) == {'foo': x['foo'].__json__()} def test_dumps_custom_object_no_json(self): x = {'foo': object()} with pytest.raises(TypeError): dumps(x) def test_loads_memoryview(self): assert loads( memoryview(bytearray(dumps({'x': 'z'}), encoding='utf-8')) ) == {'x': 'z'} def test_loads_bytearray(self): assert loads( bytearray(dumps({'x': 'z'}), encoding='utf-8') ) == {'x': 'z'} def test_loads_bytes(self): assert loads( str_to_bytes(dumps({'x': 'z'})), decode_bytes=True) == {'x': 'z'} @skip.if_python3() def test_loads_buffer(self): assert loads(buffer(dumps({'x': 'z'}))) == {'x': 'z'} def test_loads_DecodeError(self): _loads = Mock(name='_loads') _loads.side_effect = _DecodeError( MagicMock(), MagicMock(), MagicMock()) assert loads(dumps({'x': 'z'}), _loads=_loads) == {'x': 'z'} kombu-4.1.0/t/unit/utils/__init__.py0000644000175000017500000000000013130603207017233 0ustar omeromer00000000000000kombu-4.1.0/t/unit/utils/test_functional.py0000644000175000017500000001621113130603207020710 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pickle import pytest from itertools import count from case import Mock, mock, skip from kombu.five import items from kombu.utils import functional as utils from kombu.utils.functional import ( ChannelPromise, LRUCache, fxrange, fxrangemax, memoize, lazy, maybe_evaluate, maybe_list, reprcall, reprkwargs, retry_over_time, ) class test_ChannelPromise: def test_repr(self): obj = Mock(name='cb') assert 'promise' in repr(ChannelPromise(obj)) obj.assert_not_called() class test_shufflecycle: def test_shuffles(self): prev_repeat, utils.repeat = utils.repeat, Mock() try: utils.repeat.return_value = list(range(10)) values = {'A', 'B', 'C'} cycle = utils.shufflecycle(values) seen = set() for i in range(10): next(cycle) utils.repeat.assert_called_with(None) assert seen.issubset(values) with pytest.raises(StopIteration): next(cycle) next(cycle) finally: utils.repeat = prev_repeat def double(x): return x * 2 class test_LRUCache: def test_expires(self): limit = 100 x = LRUCache(limit=limit) slots = list(range(limit * 2)) for i in slots: x[i] = i assert list(x.keys()) == list(slots[limit:]) assert x.items() assert x.values() def test_is_pickleable(self): x = LRUCache(limit=10) x.update(luke=1, leia=2) y = pickle.loads(pickle.dumps(x)) assert y.limit == y.limit assert y == x def test_update_expires(self): limit = 100 x = LRUCache(limit=limit) slots = list(range(limit * 2)) for i in slots: x.update({i: i}) assert list(x.keys()) == list(slots[limit:]) def test_least_recently_used(self): x = LRUCache(3) x[1], x[2], x[3] = 1, 2, 3 assert list(x.keys()), [1, 2 == 3] x[4], x[5] = 4, 5 assert list(x.keys()), [3, 4 == 5] # access 3, which makes it the last used key. x[3] x[6] = 6 assert list(x.keys()), [5, 3 == 6] x[7] = 7 assert list(x.keys()), [3, 6 == 7] def test_update_larger_than_cache_size(self): x = LRUCache(2) x.update({x: x for x in range(100)}) assert list(x.keys()), [98 == 99] def test_items(self): c = LRUCache() c.update(a=1, b=2, c=3) assert list(items(c)) def test_incr(self): c = LRUCache() c.update(a='1') c.incr('a') assert c['a'] == '2' def test_memoize(): counter = count(1) @memoize(maxsize=2) def x(i): return next(counter) assert x(1) == 1 assert x(1) == 1 assert x(2) == 2 assert x(3) == 3 assert x(1) == 4 x.clear() assert x(3) == 5 class test_lazy: def test__str__(self): assert (str(lazy(lambda: 'the quick brown fox')) == 'the quick brown fox') def test__repr__(self): assert repr(lazy(lambda: 'fi fa fo')).strip('u') == "'fi fa fo'" @skip.if_python3() def test__cmp__(self): assert lazy(lambda: 10).__cmp__(lazy(lambda: 20)) == -1 assert lazy(lambda: 10).__cmp__(5) == 1 def test_evaluate(self): assert lazy(lambda: 2 + 2)() == 4 assert lazy(lambda x: x * 4, 2) == 8 assert lazy(lambda x: x * 8, 2)() == 16 def test_cmp(self): assert lazy(lambda: 10) == lazy(lambda: 10) assert lazy(lambda: 10) != lazy(lambda: 20) def test__reduce__(self): x = lazy(double, 4) y = pickle.loads(pickle.dumps(x)) assert x() == y() def test__deepcopy__(self): from copy import deepcopy x = lazy(double, 4) y = deepcopy(x) assert x._fun == y._fun assert x._args == y._args assert x() == y() @pytest.mark.parametrize('obj,expected', [ (lazy(lambda: 10), 10), (20, 20), ]) def test_maybe_evaluate(obj, expected): assert maybe_evaluate(obj) == expected class test_retry_over_time: class Predicate(Exception): pass def setup(self): self.index = 0 def myfun(self): if self.index < 9: raise self.Predicate() return 42 def errback(self, exc, intervals, retries): interval = next(intervals) sleepvals = (None, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 16.0) self.index += 1 assert interval == sleepvals[self.index] return interval @mock.sleepdeprived(module=utils) def test_simple(self): prev_count, utils.count = utils.count, Mock() try: utils.count.return_value = list(range(1)) x = retry_over_time(self.myfun, self.Predicate, errback=None, interval_max=14) assert x is None utils.count.return_value = list(range(10)) cb = Mock() x = retry_over_time(self.myfun, self.Predicate, errback=self.errback, callback=cb, interval_max=14) assert x == 42 assert self.index == 9 cb.assert_called_with() finally: utils.count = prev_count @mock.sleepdeprived(module=utils) def test_retry_once(self): with pytest.raises(self.Predicate): retry_over_time( self.myfun, self.Predicate, max_retries=1, errback=self.errback, interval_max=14, ) assert self.index == 1 # no errback with pytest.raises(self.Predicate): retry_over_time( self.myfun, self.Predicate, max_retries=1, errback=None, interval_max=14, ) @mock.sleepdeprived(module=utils) def test_retry_always(self): Predicate = self.Predicate class Fun(object): def __init__(self): self.calls = 0 def __call__(self, *args, **kwargs): try: if self.calls >= 10: return 42 raise Predicate() finally: self.calls += 1 fun = Fun() assert retry_over_time( fun, self.Predicate, max_retries=0, errback=None, interval_max=14) == 42 assert fun.calls == 11 @pytest.mark.parametrize('obj,expected', [ (None, None), (1, [1]), ([1, 2, 3], [1, 2, 3]), ]) def test_maybe_list(obj, expected): assert maybe_list(obj) == expected def test_fxrange__no_repeatlast(): assert list(fxrange(1.0, 3.0, 1.0)) == [1.0, 2.0, 3.0] @pytest.mark.parametrize('args,expected', [ ((1.0, 3.0, 1.0, 30.0), [1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0]), ((1.0, None, 1.0, 30.0), [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]), ]) def test_fxrangemax(args, expected): assert list(fxrangemax(*args)) == expected def test_reprkwargs(): assert reprkwargs({'foo': 'bar', 1: 2, 'k': 'v'}) def test_reprcall(): assert reprcall('add', (2, 2), {'copy': True}) kombu-4.1.0/t/unit/utils/test_url.py0000644000175000017500000000234013130603207017346 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from kombu.utils.url import as_url, parse_url, maybe_sanitize_url def test_parse_url(): assert parse_url('amqp://user:pass@localhost:5672/my/vhost') == { 'transport': 'amqp', 'userid': 'user', 'password': 'pass', 'hostname': 'localhost', 'port': 5672, 'virtual_host': 'my/vhost', } @pytest.mark.parametrize('urltuple,expected', [ (('https',), 'https:///'), (('https', 'e.com'), 'https://e.com/'), (('https', 'e.com', 80), 'https://e.com:80/'), (('https', 'e.com', 80, 'u'), 'https://u@e.com:80/'), (('https', 'e.com', 80, 'u', 'p'), 'https://u:p@e.com:80/'), (('https', 'e.com', 80, None, 'p'), 'https://:p@e.com:80/'), (('https', 'e.com', 80, None, 'p', '/foo'), 'https://:p@e.com:80//foo'), ]) def test_as_url(urltuple, expected): assert as_url(*urltuple) == expected @pytest.mark.parametrize('url,expected', [ ('foo', 'foo'), ('http://u:p@e.com//foo', 'http://u:**@e.com//foo'), ]) def test_maybe_sanitize_url(url, expected): assert maybe_sanitize_url(url) == expected assert (maybe_sanitize_url('http://u:p@e.com//foo') == 'http://u:**@e.com//foo') kombu-4.1.0/t/unit/utils/test_div.py0000644000175000017500000000220313130603207017324 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pickle from io import StringIO, BytesIO from kombu.utils.div import emergency_dump_state class MyStringIO(StringIO): def close(self): pass class MyBytesIO(BytesIO): def close(self): pass class test_emergency_dump_state: def test_dump(self, stdouts): fh = MyBytesIO() stderr = StringIO() emergency_dump_state( {'foo': 'bar'}, open_file=lambda n, m: fh, stderr=stderr) assert pickle.loads(fh.getvalue()) == {'foo': 'bar'} assert stderr.getvalue() assert not stdouts.stdout.getvalue() def test_dump_second_strategy(self, stdouts): fh = MyStringIO() stderr = StringIO() def raise_something(*args, **kwargs): raise KeyError('foo') emergency_dump_state( {'foo': 'bar'}, open_file=lambda n, m: fh, dump=raise_something, stderr=stderr, ) assert 'foo' in fh.getvalue() assert 'bar' in fh.getvalue() assert stderr.getvalue() assert not stdouts.stdout.getvalue() kombu-4.1.0/t/unit/utils/test_objects.py0000644000175000017500000000214113130603207020174 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from kombu.utils.objects import cached_property class test_cached_property: def test_deleting(self): class X(object): xx = False @cached_property def foo(self): return 42 @foo.deleter # noqa def foo(self, value): self.xx = value x = X() del(x.foo) assert not x.xx x.__dict__['foo'] = 'here' del(x.foo) assert x.xx == 'here' def test_when_access_from_class(self): class X(object): xx = None @cached_property def foo(self): return 42 @foo.setter # noqa def foo(self, value): self.xx = 10 desc = X.__dict__['foo'] assert X.foo is desc assert desc.__get__(None) is desc assert desc.__set__(None, 1) is desc assert desc.__delete__(None) is desc assert desc.setter(1) x = X() x.foo = 30 assert x.xx == 10 del(x.foo) kombu-4.1.0/t/unit/utils/test_debug.py0000644000175000017500000000325413130603207017637 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import logging from case import Mock, patch from kombu.five import bytes_if_py2 from kombu.utils.debug import Logwrapped, setup_logging class test_setup_logging: def test_adds_handlers_sets_level(self): with patch('kombu.utils.debug.get_logger') as get_logger: logger = get_logger.return_value = Mock() setup_logging(loggers=['kombu.test']) get_logger.assert_called_with('kombu.test') logger.addHandler.assert_called() logger.setLevel.assert_called_with(logging.DEBUG) class test_Logwrapped: def test_wraps(self): with patch('kombu.utils.debug.get_logger') as get_logger: logger = get_logger.return_value = Mock() W = Logwrapped(Mock(), 'kombu.test') get_logger.assert_called_with('kombu.test') assert W.instance is not None assert W.logger is logger W.instance.__repr__ = lambda s: bytes_if_py2('foo') assert repr(W) == 'foo' W.instance.some_attr = 303 assert W.some_attr == 303 W.instance.some_method.__name__ = bytes_if_py2('some_method') W.some_method(1, 2, kw=1) W.instance.some_method.assert_called_with(1, 2, kw=1) W.some_method() W.instance.some_method.assert_called_with() W.some_method(kw=1) W.instance.some_method.assert_called_with(kw=1) W.ident = 'ident' W.some_method(kw=1) logger.debug.assert_called() assert 'ident' in logger.debug.call_args[0][0] assert dir(W) == dir(W.instance) kombu-4.1.0/t/unit/utils/test_scheduling.py0000644000175000017500000000533613130603207020701 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import Mock from kombu.utils.scheduling import FairCycle, cycle_by_name class MyEmpty(Exception): pass def consume(fun, n): r = [] for i in range(n): r.append(fun(Mock(name='callback'))) return r class test_FairCycle: def test_cycle(self): resources = ['a', 'b', 'c', 'd', 'e'] callback = Mock(name='callback') def echo(r, timeout=None): return r # cycle should be ['a', 'b', 'c', 'd', 'e', ... repeat] cycle = FairCycle(echo, resources, MyEmpty) for i in range(len(resources)): assert cycle.get(callback) == resources[i] for i in range(len(resources)): assert cycle.get(callback) == resources[i] def test_cycle_breaks(self): resources = ['a', 'b', 'c', 'd', 'e'] def echo(r, callback): if r == 'c': raise MyEmpty(r) return r cycle = FairCycle(echo, resources, MyEmpty) assert consume(cycle.get, len(resources)) == [ 'a', 'b', 'd', 'e', 'a', ] assert consume(cycle.get, len(resources)) == [ 'b', 'd', 'e', 'a', 'b', ] cycle2 = FairCycle(echo, ['c', 'c'], MyEmpty) with pytest.raises(MyEmpty): consume(cycle2.get, 3) def test_cycle_no_resources(self): cycle = FairCycle(None, [], MyEmpty) cycle.pos = 10 with pytest.raises(MyEmpty): cycle._next() def test__repr__(self): assert repr(FairCycle(lambda x: x, [1, 2, 3], MyEmpty)) def test_round_robin_cycle(): it = cycle_by_name('round_robin')(['A', 'B', 'C']) assert it.consume(3) == ['A', 'B', 'C'] it.rotate('B') assert it.consume(3) == ['A', 'C', 'B'] it.rotate('A') assert it.consume(3) == ['C', 'B', 'A'] it.rotate('A') assert it.consume(3) == ['C', 'B', 'A'] it.rotate('C') assert it.consume(3) == ['B', 'A', 'C'] def test_priority_cycle(): it = cycle_by_name('priority')(['A', 'B', 'C']) assert it.consume(3) == ['A', 'B', 'C'] it.rotate('B') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('A') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('A') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('C') assert it.consume(3) == ['A', 'B', 'C'] def test_sorted_cycle(): it = cycle_by_name('sorted')(['B', 'C', 'A']) assert it.consume(3) == ['A', 'B', 'C'] it.rotate('B') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('A') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('A') assert it.consume(3) == ['A', 'B', 'C'] it.rotate('C') assert it.consume(3) == ['A', 'B', 'C'] kombu-4.1.0/t/unit/utils/test_compat.py0000644000175000017500000000552213130603207020034 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import socket import sys import types from case import Mock, mock, patch from kombu.five import bytes_if_py2 from kombu.utils import compat from kombu.utils.compat import entrypoints, maybe_fileno class test_entrypoints: @mock.mask_modules('pkg_resources') def test_without_pkg_resources(self): assert list(entrypoints('kombu.test')) == [] @mock.module_exists('pkg_resources') def test_with_pkg_resources(self): with patch('pkg_resources.iter_entry_points', create=True) as iterep: eps = iterep.return_value = [Mock(), Mock()] assert list(entrypoints('kombu.test')) iterep.assert_called_with('kombu.test') eps[0].load.assert_called_with() eps[1].load.assert_called_with() def test_maybe_fileno(): assert maybe_fileno(3) == 3 f = Mock(name='file') assert maybe_fileno(f) is f.fileno() f.fileno.side_effect = ValueError() assert maybe_fileno(f) is None class test_detect_environment: def test_detect_environment(self): try: compat._environment = None X = compat.detect_environment() assert compat._environment == X Y = compat.detect_environment() assert Y == X finally: compat._environment = None @mock.module_exists('eventlet', 'eventlet.patcher') def test_detect_environment_eventlet(self): with patch('eventlet.patcher.is_monkey_patched', create=True) as m: assert sys.modules['eventlet'] m.return_value = True env = compat._detect_environment() m.assert_called_with(socket) assert env == 'eventlet' @mock.module_exists('gevent') def test_detect_environment_gevent(self): with patch('gevent.socket', create=True) as m: prev, socket.socket = socket.socket, m.socket try: assert sys.modules['gevent'] env = compat._detect_environment() assert env == 'gevent' finally: socket.socket = prev def test_detect_environment_no_eventlet_or_gevent(self): try: sys.modules['eventlet'] = types.ModuleType( bytes_if_py2('eventlet')) sys.modules['eventlet.patcher'] = types.ModuleType( bytes_if_py2('patcher')) assert compat._detect_environment() == 'default' finally: sys.modules.pop('eventlet.patcher', None) sys.modules.pop('eventlet', None) compat._detect_environment() try: sys.modules['gevent'] = types.ModuleType(bytes_if_py2('gevent')) assert compat._detect_environment() == 'default' finally: sys.modules.pop('gevent', None) compat._detect_environment() kombu-4.1.0/t/unit/utils/test_amq_manager.py0000644000175000017500000000231513130603207021016 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from case import mock, patch from kombu import Connection class test_get_manager: @mock.mask_modules('pyrabbit') def test_without_pyrabbit(self): with pytest.raises(ImportError): Connection('amqp://').get_manager() @mock.module_exists('pyrabbit') def test_with_pyrabbit(self): with patch('pyrabbit.Client', create=True) as Client: manager = Connection('amqp://').get_manager() assert manager is not None Client.assert_called_with( 'localhost:15672', 'guest', 'guest', ) @mock.module_exists('pyrabbit') def test_transport_options(self): with patch('pyrabbit.Client', create=True) as Client: manager = Connection('amqp://', transport_options={ 'manager_hostname': 'admin.mq.vandelay.com', 'manager_port': 808, 'manager_userid': 'george', 'manager_password': 'bosco', }).get_manager() assert manager is not None Client.assert_called_with( 'admin.mq.vandelay.com:808', 'george', 'bosco', ) kombu-4.1.0/t/unit/utils/test_utils.py0000644000175000017500000000116713130603207017712 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from kombu import version_info_t from kombu.utils.text import version_string_as_tuple def test_dir(): import kombu assert dir(kombu) @pytest.mark.parametrize('version,expected', [ ('3', version_info_t(3, 0, 0, '', '')), ('3.3', version_info_t(3, 3, 0, '', '')), ('3.3.1', version_info_t(3, 3, 1, '', '')), ('3.3.1a3', version_info_t(3, 3, 1, 'a3', '')), ('3.3.1.a3.40c32', version_info_t(3, 3, 1, 'a3', '40c32')), ]) def test_version_string_as_tuple(version, expected): assert version_string_as_tuple(version) == expected kombu-4.1.0/t/unit/utils/test_time.py0000644000175000017500000000074113130603207017505 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from kombu.utils.time import maybe_s_to_ms @pytest.mark.parametrize('input,expected', [ (3, 3000), (3.0, 3000), (303, 303000), (303.33, 303330), (303.333, 303333), (303.3334, 303333), (None, None), (0, 0), ]) def test_maybe_s_to_ms(input, expected): ret = maybe_s_to_ms(input) if expected is None: assert ret is None else: assert ret == expected kombu-4.1.0/t/unit/utils/test_encoding.py0000644000175000017500000000541613130603207020341 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import sys from contextlib import contextmanager from case import patch, skip from kombu.five import bytes_t, string_t from kombu.utils.encoding import ( get_default_encoding_file, safe_str, set_default_encoding_file, default_encoding, ) @contextmanager def clean_encoding(): old_encoding = sys.modules.pop('kombu.utils.encoding', None) import kombu.utils.encoding try: yield kombu.utils.encoding finally: if old_encoding: sys.modules['kombu.utils.encoding'] = old_encoding class test_default_encoding: def test_set_default_file(self): prev = get_default_encoding_file() try: set_default_encoding_file('/foo.txt') assert get_default_encoding_file() == '/foo.txt' finally: set_default_encoding_file(prev) @patch('sys.getfilesystemencoding') def test_default(self, getdefaultencoding): getdefaultencoding.return_value = 'ascii' with clean_encoding() as encoding: enc = encoding.default_encoding() if sys.platform.startswith('java'): assert enc == 'utf-8' else: assert enc == 'ascii' getdefaultencoding.assert_called_with() @skip.if_python3() def test_str_to_bytes(): with clean_encoding() as e: assert isinstance(e.str_to_bytes('foobar'), bytes_t) @skip.if_python3() def test_from_utf8(): with clean_encoding() as e: assert isinstance(e.from_utf8('foobar'), bytes_t) @skip.if_python3() def test_default_encode(): with clean_encoding() as e: assert e.default_encode(b'foo') class test_safe_str: def setup(self): self._encoding = self.patching('sys.getfilesystemencoding') self._encoding.return_value = 'ascii' def test_when_bytes(self): assert safe_str('foo') == 'foo' def test_when_unicode(self): assert isinstance(safe_str('foo'), string_t) def test_when_encoding_utf8(self): self._encoding.return_value = 'utf-8' assert default_encoding() == 'utf-8' s = 'The quiæk fÃ¥x jømps øver the lazy dÃ¥g' res = safe_str(s) assert isinstance(res, str) def test_when_containing_high_chars(self): self._encoding.return_value = 'ascii' s = 'The quiæk fÃ¥x jømps øver the lazy dÃ¥g' res = safe_str(s) assert isinstance(res, str) assert len(s) == len(res) def test_when_not_string(self): o = object() assert safe_str(o) == repr(o) def test_when_unrepresentable(self): class O(object): def __repr__(self): raise KeyError('foo') assert ' Entry(lambda x: 2) Entry(lambda x: 1) >= Entry(lambda x: 2) Entry(lambda x: 1) <= Entry(lambda x: 2) def test_eq(self): x = Entry(lambda x: 1) y = Entry(lambda x: 1) assert x == x assert x != y class test_Timer: def test_enter_exit(self): x = Timer() x.stop = Mock(name='timer.stop') with x: pass x.stop.assert_called_with() def test_supports_Timer_interface(self): x = Timer() x.stop() tref = Mock() x.cancel(tref) tref.cancel.assert_called_with() assert x.schedule is x def test_handle_error(self): from datetime import datetime on_error = Mock(name='on_error') s = Timer(on_error=on_error) with patch('kombu.async.timer.to_timestamp') as tot: tot.side_effect = OverflowError() s.enter_at(Entry(lambda: None, (), {}), eta=datetime.now()) s.enter_at(Entry(lambda: None, (), {}), eta=None) s.on_error = None with pytest.raises(OverflowError): s.enter_at(Entry(lambda: None, (), {}), eta=datetime.now()) on_error.assert_called_once() exc = on_error.call_args[0][0] assert isinstance(exc, OverflowError) def test_call_repeatedly(self): t = Timer() try: t.schedule.enter_after = Mock() myfun = Mock() myfun.__name__ = bytes_if_py2('myfun') t.call_repeatedly(0.03, myfun) assert t.schedule.enter_after.call_count == 1 args1, _ = t.schedule.enter_after.call_args_list[0] sec1, tref1, _ = args1 assert sec1 == 0.03 tref1() assert t.schedule.enter_after.call_count == 2 args2, _ = t.schedule.enter_after.call_args_list[1] sec2, tref2, _ = args2 assert sec2 == 0.03 tref2.canceled = True tref2() assert t.schedule.enter_after.call_count == 2 finally: t.stop() @patch('kombu.async.timer.logger') def test_apply_entry_error_handled(self, logger): t = Timer() t.schedule.on_error = None fun = Mock() fun.side_effect = ValueError() t.schedule.apply_entry(fun) logger.error.assert_called() def test_apply_entry_error_not_handled(self, stdouts): t = Timer() t.schedule.on_error = Mock() fun = Mock() fun.side_effect = ValueError() t.schedule.apply_entry(fun) fun.assert_called_with() assert not stdouts.stderr.getvalue() def test_enter_after(self): t = Timer() t._enter = Mock() fun = Mock(name='fun') time = Mock(name='time') time.return_value = 10 t.enter_after(10, fun, time=time) time.assert_called_with() t._enter.assert_called_with(20, 0, fun) def test_cancel(self): t = Timer() tref = Mock() t.cancel(tref) tref.cancel.assert_called_with() kombu-4.1.0/t/unit/async/test_semaphore.py0000644000175000017500000000216113130603207020505 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from kombu.async.semaphore import LaxBoundedSemaphore class test_LaxBoundedSemaphore: def test_over_release(self): x = LaxBoundedSemaphore(2) calls = [] for i in range(1, 21): x.acquire(calls.append, i) x.release() x.acquire(calls.append, 'x') x.release() x.acquire(calls.append, 'y') assert calls, [1, 2, 3 == 4] for i in range(30): x.release() assert calls, list(range(1, 21)) + ['x' == 'y'] assert x.value == x.initial_value calls[:] = [] for i in range(1, 11): x.acquire(calls.append, i) for i in range(1, 11): x.release() assert calls, list(range(1 == 11)) calls[:] = [] assert x.value == x.initial_value x.acquire(calls.append, 'x') assert x.value == 1 x.acquire(calls.append, 'y') assert x.value == 0 x.release() assert x.value == 1 x.release() assert x.value == 2 x.release() assert x.value == 2 kombu-4.1.0/t/unit/async/aws/0000755000175000017500000000000013134154263015712 5ustar omeromer00000000000000kombu-4.1.0/t/unit/async/aws/case.py0000644000175000017500000000040413130603207017166 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import pytest from case import skip @skip.if_pypy() @skip.unless_module('boto3') @skip.unless_module('pycurl') @pytest.mark.usefixtures('hub') class AWSCase(object): pass kombu-4.1.0/t/unit/async/aws/__init__.py0000644000175000017500000000000013130603207020002 0ustar omeromer00000000000000kombu-4.1.0/t/unit/async/aws/test_aws.py0000644000175000017500000000053413130603207020110 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from case import Mock from kombu.async.aws import connect_sqs from .case import AWSCase class test_connect_sqs(AWSCase): def test_connection(self): x = connect_sqs('AAKI', 'ASAK', http_client=Mock()) assert x assert x.sqs_connection kombu-4.1.0/t/unit/async/aws/test_connection.py0000644000175000017500000002025513130603207021457 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from contextlib import contextmanager from case import Mock from vine.abstract import Thenable from kombu.exceptions import HttpError from kombu.five import WhateverIO from kombu.async import http from kombu.async.aws.connection import ( AsyncHTTPSConnection, AsyncHTTPResponse, AsyncConnection, AsyncAWSQueryConnection, ) from kombu.async.aws.ext import boto3 from .case import AWSCase from t.mocks import PromiseMock try: from urllib.parse import urlparse, parse_qs except ImportError: from urlparse import urlparse, parse_qs # noqa # Not currently working VALIDATES_CERT = False def passthrough(*args, **kwargs): m = Mock(*args, **kwargs) def side_effect(ret): return ret m.side_effect = side_effect return m class test_AsyncHTTPSConnection(AWSCase): def test_http_client(self): x = AsyncHTTPSConnection() assert x.http_client is http.get_client() client = Mock(name='http_client') y = AsyncHTTPSConnection(http_client=client) assert y.http_client is client def test_args(self): x = AsyncHTTPSConnection( strict=True, timeout=33.3, ) assert x.strict assert x.timeout == 33.3 def test_request(self): x = AsyncHTTPSConnection('aws.vandelay.com') x.request('PUT', '/importer-exporter') assert x.path == '/importer-exporter' assert x.method == 'PUT' def test_request_with_body_buffer(self): x = AsyncHTTPSConnection('aws.vandelay.com') body = Mock(name='body') body.read.return_value = 'Vandelay Industries' x.request('PUT', '/importer-exporter', body) assert x.method == 'PUT' assert x.path == '/importer-exporter' assert x.body == 'Vandelay Industries' body.read.assert_called_with() def test_request_with_body_text(self): x = AsyncHTTPSConnection('aws.vandelay.com') x.request('PUT', '/importer-exporter', 'Vandelay Industries') assert x.method == 'PUT' assert x.path == '/importer-exporter' assert x.body == 'Vandelay Industries' def test_request_with_headers(self): x = AsyncHTTPSConnection() headers = {'Proxy': 'proxy.vandelay.com'} x.request('PUT', '/importer-exporter', None, headers) assert 'Proxy' in dict(x.headers) assert dict(x.headers)['Proxy'] == 'proxy.vandelay.com' def assert_request_created_with(self, url, conn): conn.Request.assert_called_with( url, method=conn.method, headers=http.Headers(conn.headers), body=conn.body, connect_timeout=conn.timeout, request_timeout=conn.timeout, validate_cert=VALIDATES_CERT, ) def test_getresponse(self): client = Mock(name='client') client.add_request = passthrough(name='client.add_request') x = AsyncHTTPSConnection(http_client=client) x.Response = Mock(name='x.Response') request = x.getresponse() x.http_client.add_request.assert_called_with(request) assert isinstance(request, Thenable) assert isinstance(request.on_ready, Thenable) response = Mock(name='Response') request.on_ready(response) x.Response.assert_called_with(response) def test_getresponse__real_response(self): client = Mock(name='client') client.add_request = passthrough(name='client.add_request') callback = PromiseMock(name='callback') x = AsyncHTTPSConnection(http_client=client) request = x.getresponse(callback) x.http_client.add_request.assert_called_with(request) buf = WhateverIO() buf.write('The quick brown fox jumps') headers = http.Headers({'X-Foo': 'Hello', 'X-Bar': 'World'}) response = http.Response(request, 200, headers, buf) request.on_ready(response) callback.assert_called() wresponse = callback.call_args[0][0] assert wresponse.read() == 'The quick brown fox jumps' assert wresponse.status == 200 assert wresponse.getheader('X-Foo') == 'Hello' headers_dict = wresponse.getheaders() assert dict(headers_dict) == headers assert wresponse.msg assert repr(wresponse) def test_repr(self): assert repr(AsyncHTTPSConnection()) def test_putrequest(self): x = AsyncHTTPSConnection() x.putrequest('UPLOAD', '/new') assert x.method == 'UPLOAD' assert x.path == '/new' def test_putheader(self): x = AsyncHTTPSConnection() x.putheader('X-Foo', 'bar') assert x.headers == [('X-Foo', 'bar')] x.putheader('X-Bar', 'baz') assert x.headers == [ ('X-Foo', 'bar'), ('X-Bar', 'baz'), ] def test_send(self): x = AsyncHTTPSConnection() x.send('foo') assert x.body == 'foo' x.send('bar') assert x.body == 'foobar' def test_interface(self): x = AsyncHTTPSConnection() assert x.set_debuglevel(3) is None assert x.connect() is None assert x.close() is None assert x.endheaders() is None class test_AsyncHTTPResponse(AWSCase): def test_with_error(self): r = Mock(name='response') r.error = HttpError(404, 'NotFound') x = AsyncHTTPResponse(r) assert x.reason == 'NotFound' r.error = None assert not x.reason class test_AsyncConnection(AWSCase): def test_client(self): sqs = Mock(name='sqs') x = AsyncConnection(sqs) assert x._httpclient is http.get_client() client = Mock(name='client') y = AsyncConnection(sqs, http_client=client) assert y._httpclient is client def test_get_http_connection(self): sqs = Mock(name='sqs') x = AsyncConnection(sqs) assert isinstance( x.get_http_connection(), AsyncHTTPSConnection, ) conn = x.get_http_connection() assert conn.http_client is x._httpclient class test_AsyncAWSQueryConnection(AWSCase): def setup(self): session = boto3.session.Session( aws_access_key_id='AAA', aws_secret_access_key='AAAA', region_name='us-west-2', ) sqs_client = session.client('sqs') self.x = AsyncAWSQueryConnection(sqs_client, http_client=Mock(name='client')) def test_make_request(self): _mexe, self.x._mexe = self.x._mexe, Mock(name='_mexe') Conn = self.x.get_http_connection = Mock(name='get_http_connection') callback = PromiseMock(name='callback') self.x.make_request( 'action', {'foo': 1}, 'https://foo.com/', 'GET', callback=callback, ) self.x._mexe.assert_called() request = self.x._mexe.call_args[0][0] parsed = urlparse(request.url) params = parse_qs(parsed.query) assert params['Action'][0] == 'action' ret = _mexe(request, callback=callback) assert ret is callback Conn.return_value.request.assert_called() Conn.return_value.getresponse.assert_called_with( callback=callback, ) def test_make_request__no_action(self): self.x._mexe = Mock(name='_mexe') self.x.get_http_connection = Mock(name='get_http_connection') callback = PromiseMock(name='callback') self.x.make_request( None, {'foo': 1}, 'http://foo.com/', 'GET', callback=callback, ) self.x._mexe.assert_called() request = self.x._mexe.call_args[0][0] parsed = urlparse(request.url) params = parse_qs(parsed.query) assert 'Action' not in params def Response(self, status, body): r = Mock(name='response') r.status = status r.read.return_value = body return r @contextmanager def mock_make_request(self): self.x.make_request = Mock(name='make_request') callback = PromiseMock(name='callback') yield callback def assert_make_request_called(self): self.x.make_request.assert_called() return self.x.make_request.call_args[1]['callback'] kombu-4.1.0/t/unit/async/aws/sqs/0000755000175000017500000000000013134154263016520 5ustar omeromer00000000000000kombu-4.1.0/t/unit/async/aws/sqs/__init__.py0000644000175000017500000000000013130603207020610 0ustar omeromer00000000000000kombu-4.1.0/t/unit/async/aws/sqs/test_queue.py0000644000175000017500000001600513130603207021250 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import pytest from case import Mock from kombu.async.aws.sqs.message import AsyncMessage from kombu.async.aws.sqs.queue import AsyncQueue from t.mocks import PromiseMock from ..case import AWSCase class test_AsyncQueue(AWSCase): def setup(self): self.conn = Mock(name='connection') self.x = AsyncQueue(self.conn, '/url') self.callback = PromiseMock(name='callback') def test_message_class(self): assert issubclass(self.x.message_class, AsyncMessage) def test_get_attributes(self): self.x.get_attributes(attributes='QueueSize', callback=self.callback) self.x.connection.get_queue_attributes.assert_called_with( self.x, 'QueueSize', self.callback, ) def test_set_attribute(self): self.x.set_attribute('key', 'value', callback=self.callback) self.x.connection.set_queue_attribute.assert_called_with( self.x, 'key', 'value', self.callback, ) def test_get_timeout(self): self.x.get_timeout(callback=self.callback) self.x.connection.get_queue_attributes.assert_called() on_ready = self.x.connection.get_queue_attributes.call_args[0][2] self.x.connection.get_queue_attributes.assert_called_with( self.x, 'VisibilityTimeout', on_ready, ) on_ready({'VisibilityTimeout': '303'}) self.callback.assert_called_with(303) def test_set_timeout(self): self.x.set_timeout(808, callback=self.callback) self.x.connection.set_queue_attribute.assert_called() on_ready = self.x.connection.set_queue_attribute.call_args[0][3] self.x.connection.set_queue_attribute.assert_called_with( self.x, 'VisibilityTimeout', 808, on_ready, ) on_ready(808) self.callback.assert_called_with(808) assert self.x.visibility_timeout == 808 on_ready(None) assert self.x.visibility_timeout == 808 def test_add_permission(self): self.x.add_permission( 'label', 'accid', 'action', callback=self.callback, ) self.x.connection.add_permission.assert_called_with( self.x, 'label', 'accid', 'action', self.callback, ) def test_remove_permission(self): self.x.remove_permission('label', callback=self.callback) self.x.connection.remove_permission.assert_called_with( self.x, 'label', self.callback, ) def test_read(self): self.x.read(visibility_timeout=909, callback=self.callback) self.x.connection.receive_message.assert_called() on_ready = self.x.connection.receive_message.call_args[1]['callback'] self.x.connection.receive_message.assert_called_with( self.x, number_messages=1, visibility_timeout=909, attributes=None, wait_time_seconds=None, callback=on_ready, ) messages = [Mock(name='message1')] on_ready(messages) self.callback.assert_called_with(messages[0]) def MockMessage(self, id, md5): m = Mock(name='Message-{0}'.format(id)) m.id = id m.md5 = md5 return m def test_write(self): message = self.MockMessage('id1', 'digest1') self.x.write(message, delay_seconds=303, callback=self.callback) self.x.connection.send_message.assert_called() on_ready = self.x.connection.send_message.call_args[1]['callback'] self.x.connection.send_message.assert_called_with( self.x, message.get_body_encoded(), 303, callback=on_ready, ) new_message = self.MockMessage('id2', 'digest2') on_ready(new_message) assert message.id == 'id2' assert message.md5 == 'digest2' def test_write_batch(self): messages = [('id1', 'A', 0), ('id2', 'B', 303)] self.x.write_batch(messages, callback=self.callback) self.x.connection.send_message_batch.assert_called_with( self.x, messages, callback=self.callback, ) def test_delete_message(self): message = self.MockMessage('id1', 'digest1') self.x.delete_message(message, callback=self.callback) self.x.connection.delete_message.assert_called_with( self.x, message, self.callback, ) def test_delete_message_batch(self): messages = [ self.MockMessage('id1', 'r1'), self.MockMessage('id2', 'r2'), ] self.x.delete_message_batch(messages, callback=self.callback) self.x.connection.delete_message_batch.assert_called_with( self.x, messages, callback=self.callback, ) def test_change_message_visibility_batch(self): messages = [ (self.MockMessage('id1', 'r1'), 303), (self.MockMessage('id2', 'r2'), 909), ] self.x.change_message_visibility_batch( messages, callback=self.callback, ) self.x.connection.change_message_visibility_batch.assert_called_with( self.x, messages, callback=self.callback, ) def test_delete(self): self.x.delete(callback=self.callback) self.x.connection.delete_queue.assert_called_with( self.x, callback=self.callback, ) def test_count(self): self.x.count(callback=self.callback) self.x.connection.get_queue_attributes.assert_called() on_ready = self.x.connection.get_queue_attributes.call_args[0][2] self.x.connection.get_queue_attributes.assert_called_with( self.x, 'ApproximateNumberOfMessages', on_ready, ) on_ready({'ApproximateNumberOfMessages': '909'}) self.callback.assert_called_with(909) def test_interface__count_slow(self): with pytest.raises(NotImplementedError): self.x.count_slow() def test_interface__dump(self): with pytest.raises(NotImplementedError): self.x.dump() def test_interface__save_to_file(self): with pytest.raises(NotImplementedError): self.x.save_to_file() def test_interface__save_to_filename(self): with pytest.raises(NotImplementedError): self.x.save_to_filename() def test_interface__save(self): with pytest.raises(NotImplementedError): self.x.save() def test_interface__save_to_s3(self): with pytest.raises(NotImplementedError): self.x.save_to_s3() def test_interface__load_from_s3(self): with pytest.raises(NotImplementedError): self.x.load_from_s3() def test_interface__load_from_file(self): with pytest.raises(NotImplementedError): self.x.load_from_file() def test_interface__load_from_filename(self): with pytest.raises(NotImplementedError): self.x.load_from_filename() def test_interface__load(self): with pytest.raises(NotImplementedError): self.x.load() def test_interface__clear(self): with pytest.raises(NotImplementedError): self.x.clear() kombu-4.1.0/t/unit/async/aws/sqs/test_connection.py0000644000175000017500000002620113130603207022262 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from case import Mock, MagicMock from kombu.async.aws.sqs.connection import ( AsyncSQSConnection ) from kombu.async.aws.ext import boto3 from kombu.async.aws.sqs.message import AsyncMessage from kombu.async.aws.sqs.queue import AsyncQueue from kombu.utils.uuid import uuid from t.mocks import PromiseMock from ..case import AWSCase class test_AsyncSQSConnection(AWSCase): def setup(self): session = boto3.session.Session( aws_access_key_id='AAA', aws_secret_access_key='AAAA', region_name='us-west-2', ) sqs_client = session.client('sqs') self.x = AsyncSQSConnection(sqs_client, 'ak', 'sk', http_client=Mock()) self.x.get_object = Mock(name='X.get_object') self.x.get_status = Mock(name='X.get_status') self.x.get_list = Mock(name='X.get_list') self.callback = PromiseMock(name='callback') sqs_client.get_queue_url = MagicMock(return_value={ 'QueueUrl': 'http://aws.com' }) def test_create_queue(self): self.x.create_queue('foo', callback=self.callback) self.x.get_object.assert_called_with( 'CreateQueue', {'QueueName': 'foo'}, callback=self.callback, ) def test_create_queue__with_visibility_timeout(self): self.x.create_queue( 'foo', visibility_timeout=33, callback=self.callback, ) self.x.get_object.assert_called_with( 'CreateQueue', { 'QueueName': 'foo', 'DefaultVisibilityTimeout': '33' }, callback=self.callback ) def test_delete_queue(self): queue = Mock(name='queue') self.x.delete_queue(queue, callback=self.callback) self.x.get_status.assert_called_with( 'DeleteQueue', None, queue.id, callback=self.callback, ) def test_get_queue_attributes(self): queue = Mock(name='queue') self.x.get_queue_attributes( queue, attribute='QueueSize', callback=self.callback, ) self.x.get_object.assert_called_with( 'GetQueueAttributes', {'AttributeName': 'QueueSize'}, queue.id, callback=self.callback, ) def test_set_queue_attribute(self): queue = Mock(name='queue') self.x.set_queue_attribute( queue, 'Expires', '3600', callback=self.callback, ) self.x.get_status.assert_called_with( 'SetQueueAttribute', { 'Attribute.Name': 'Expires', 'Attribute.Value': '3600', }, queue.id, callback=self.callback, ) def test_receive_message(self): queue = Mock(name='queue') self.x.receive_message(queue, 4, callback=self.callback) self.x.get_list.assert_called_with( 'ReceiveMessage', {'MaxNumberOfMessages': 4}, [('Message', AsyncMessage)], 'http://aws.com', callback=self.callback, parent=queue, ) def test_receive_message__with_visibility_timeout(self): queue = Mock(name='queue') self.x.receive_message(queue, 4, 3666, callback=self.callback) self.x.get_list.assert_called_with( 'ReceiveMessage', { 'MaxNumberOfMessages': 4, 'VisibilityTimeout': 3666, }, [('Message', AsyncMessage)], 'http://aws.com', callback=self.callback, parent=queue, ) def test_receive_message__with_wait_time_seconds(self): queue = Mock(name='queue') self.x.receive_message( queue, 4, wait_time_seconds=303, callback=self.callback, ) self.x.get_list.assert_called_with( 'ReceiveMessage', { 'MaxNumberOfMessages': 4, 'WaitTimeSeconds': 303, }, [('Message', AsyncMessage)], 'http://aws.com', callback=self.callback, parent=queue, ) def test_receive_message__with_attributes(self): queue = Mock(name='queue') self.x.receive_message( queue, 4, attributes=['foo', 'bar'], callback=self.callback, ) self.x.get_list.assert_called_with( 'ReceiveMessage', { 'AttributeName.1': 'foo', 'AttributeName.2': 'bar', 'MaxNumberOfMessages': 4, }, [('Message', AsyncMessage)], 'http://aws.com', callback=self.callback, parent=queue, ) def MockMessage(self, id=None, receipt_handle=None, body=None): m = Mock(name='message') m.id = id or uuid() m.receipt_handle = receipt_handle or uuid() m._body = body def _get_body(): return m._body m.get_body.side_effect = _get_body def _set_body(value): m._body = value m.set_body.side_effect = _set_body return m def test_delete_message(self): queue = Mock(name='queue') message = self.MockMessage() self.x.delete_message(queue, message.receipt_handle, callback=self.callback) self.x.get_status.assert_called_with( 'DeleteMessage', {'ReceiptHandle': message.receipt_handle}, queue, callback=self.callback, ) def test_delete_message_batch(self): queue = Mock(name='queue') messages = [self.MockMessage('1', 'r1'), self.MockMessage('2', 'r2')] self.x.delete_message_batch(queue, messages, callback=self.callback) self.x.get_object.assert_called_with( 'DeleteMessageBatch', { 'DeleteMessageBatchRequestEntry.1.Id': '1', 'DeleteMessageBatchRequestEntry.1.ReceiptHandle': 'r1', 'DeleteMessageBatchRequestEntry.2.Id': '2', 'DeleteMessageBatchRequestEntry.2.ReceiptHandle': 'r2', }, queue.id, verb='POST', callback=self.callback, ) def test_send_message(self): queue = Mock(name='queue') self.x.send_message(queue, 'hello', callback=self.callback) self.x.get_object.assert_called_with( 'SendMessage', {'MessageBody': 'hello'}, queue.id, verb='POST', callback=self.callback, ) def test_send_message__with_delay_seconds(self): queue = Mock(name='queue') self.x.send_message( queue, 'hello', delay_seconds='303', callback=self.callback, ) self.x.get_object.assert_called_with( 'SendMessage', {'MessageBody': 'hello', 'DelaySeconds': 303}, queue.id, verb='POST', callback=self.callback, ) def test_send_message_batch(self): queue = Mock(name='queue') messages = [self.MockMessage('1', 'r1', 'A'), self.MockMessage('2', 'r2', 'B')] self.x.send_message_batch( queue, [(m.id, m.get_body(), 303) for m in messages], callback=self.callback ) self.x.get_object.assert_called_with( 'SendMessageBatch', { 'SendMessageBatchRequestEntry.1.Id': '1', 'SendMessageBatchRequestEntry.1.MessageBody': 'A', 'SendMessageBatchRequestEntry.1.DelaySeconds': 303, 'SendMessageBatchRequestEntry.2.Id': '2', 'SendMessageBatchRequestEntry.2.MessageBody': 'B', 'SendMessageBatchRequestEntry.2.DelaySeconds': 303, }, queue.id, verb='POST', callback=self.callback, ) def test_change_message_visibility(self): queue = Mock(name='queue') self.x.change_message_visibility( queue, 'rcpt', 33, callback=self.callback, ) self.x.get_status.assert_called_with( 'ChangeMessageVisibility', { 'ReceiptHandle': 'rcpt', 'VisibilityTimeout': 33, }, queue.id, callback=self.callback, ) def test_change_message_visibility_batch(self): queue = Mock(name='queue') messages = [ (self.MockMessage('1', 'r1'), 303), (self.MockMessage('2', 'r2'), 909), ] self.x.change_message_visibility_batch( queue, messages, callback=self.callback, ) def preamble(n): return '.'.join(['ChangeMessageVisibilityBatchRequestEntry', n]) self.x.get_object.assert_called_with( 'ChangeMessageVisibilityBatch', { preamble('1.Id'): '1', preamble('1.ReceiptHandle'): 'r1', preamble('1.VisibilityTimeout'): 303, preamble('2.Id'): '2', preamble('2.ReceiptHandle'): 'r2', preamble('2.VisibilityTimeout'): 909, }, queue.id, verb='POST', callback=self.callback, ) def test_get_all_queues(self): self.x.get_all_queues(callback=self.callback) self.x.get_list.assert_called_with( 'ListQueues', {}, [('QueueUrl', AsyncQueue)], callback=self.callback, ) def test_get_all_queues__with_prefix(self): self.x.get_all_queues(prefix='kombu.', callback=self.callback) self.x.get_list.assert_called_with( 'ListQueues', {'QueueNamePrefix': 'kombu.'}, [('QueueUrl', AsyncQueue)], callback=self.callback, ) def MockQueue(self, url): q = Mock(name='Queue') q.url = url return q def test_get_queue(self): self.x.get_queue('foo', callback=self.callback) self.x.get_list.assert_called() on_ready = self.x.get_list.call_args[1]['callback'] queues = [ self.MockQueue('/queues/bar'), self.MockQueue('/queues/baz'), self.MockQueue('/queues/foo'), ] on_ready(queues) self.callback.assert_called_with(queues[-1]) self.x.get_list.assert_called_with( 'ListQueues', {'QueueNamePrefix': 'foo'}, [('QueueUrl', AsyncQueue)], callback=on_ready, ) def test_get_dead_letter_source_queues(self): queue = Mock(name='queue') self.x.get_dead_letter_source_queues(queue, callback=self.callback) self.x.get_list.assert_called_with( 'ListDeadLetterSourceQueues', {'QueueUrl': queue.url}, [('QueueUrl', AsyncQueue)], callback=self.callback, ) def test_add_permission(self): queue = Mock(name='queue') self.x.add_permission( queue, 'label', 'accid', 'action', callback=self.callback, ) self.x.get_status.assert_called_with( 'AddPermission', { 'Label': 'label', 'AWSAccountId': 'accid', 'ActionName': 'action', }, queue.id, callback=self.callback, ) def test_remove_permission(self): queue = Mock(name='queue') self.x.remove_permission(queue, 'label', callback=self.callback) self.x.get_status.assert_called_with( 'RemovePermission', {'Label': 'label'}, queue.id, callback=self.callback, ) kombu-4.1.0/t/unit/async/__init__.py0000644000175000017500000000000013130603207017210 0ustar omeromer00000000000000kombu-4.1.0/t/unit/async/http/0000755000175000017500000000000013134154263016077 5ustar omeromer00000000000000kombu-4.1.0/t/unit/async/http/__init__.py0000644000175000017500000000000013130603207020167 0ustar omeromer00000000000000kombu-4.1.0/t/unit/async/http/test_http.py0000644000175000017500000001015713130603207020464 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest from io import BytesIO from vine import promise from case import Mock, skip from kombu.async import http from kombu.async.http.base import BaseClient, normalize_header from kombu.exceptions import HttpError from t.mocks import PromiseMock class test_Headers: def test_normalize(self): assert normalize_header('accept-encoding') == 'Accept-Encoding' @pytest.mark.usefixtures('hub') class test_Request: def test_init(self): x = http.Request('http://foo', method='POST') assert x.url == 'http://foo' assert x.method == 'POST' x = http.Request('x', max_redirects=100) assert x.max_redirects == 100 assert isinstance(x.headers, http.Headers) h = http.Headers() x = http.Request('x', headers=h) assert x.headers is h assert isinstance(x.on_ready, promise) def test_then(self): callback = PromiseMock(name='callback') x = http.Request('http://foo') x.then(callback) x.on_ready(1) callback.assert_called_with(1) @pytest.mark.usefixtures('hub') class test_Response: def test_init(self): req = http.Request('http://foo') r = http.Response(req, 200) assert r.status == 'OK' assert r.effective_url == 'http://foo' r.raise_for_error() def test_raise_for_error(self): req = http.Request('http://foo') r = http.Response(req, 404) assert r.status == 'Not Found' assert r.error with pytest.raises(HttpError): r.raise_for_error() def test_get_body(self): req = http.Request('http://foo') req.buffer = BytesIO() req.buffer.write(b'hello') rn = http.Response(req, 200, buffer=None) assert rn.body is None r = http.Response(req, 200, buffer=req.buffer) assert r._body is None assert r.body == b'hello' assert r._body == b'hello' assert r.body == b'hello' class test_BaseClient: @pytest.fixture(autouse=True) def setup_hub(self, hub): self.hub = hub def test_init(self): c = BaseClient(Mock(name='hub')) assert c.hub assert c._header_parser def test_perform(self): c = BaseClient(Mock(name='hub')) c.add_request = Mock(name='add_request') c.perform('http://foo') c.add_request.assert_called() assert isinstance(c.add_request.call_args[0][0], http.Request) req = http.Request('http://bar') c.perform(req) c.add_request.assert_called_with(req) def test_add_request(self): c = BaseClient(Mock(name='hub')) with pytest.raises(NotImplementedError): c.add_request(Mock(name='request')) def test_header_parser(self): c = BaseClient(Mock(name='hub')) parser = c._header_parser headers = http.Headers() c.on_header(headers, 'HTTP/1.1') c.on_header(headers, 'x-foo-bar: 123') c.on_header(headers, 'People: George Costanza') assert headers._prev_key == 'People' c.on_header(headers, ' Jerry Seinfeld') c.on_header(headers, ' Elaine Benes') c.on_header(headers, ' Cosmo Kramer') assert not headers.complete c.on_header(headers, '') assert headers.complete with pytest.raises(KeyError): parser.throw(KeyError('foo')) c.on_header(headers, '') assert headers['X-Foo-Bar'] == '123' assert (headers['People'] == 'George Costanza Jerry Seinfeld Elaine Benes Cosmo Kramer') def test_close(self): BaseClient(Mock(name='hub')).close() def test_as_context(self): c = BaseClient(Mock(name='hub')) c.close = Mock(name='close') with c: pass c.close.assert_called_with() @skip.if_pypy() @skip.unless_module('pycurl') class test_Client: def test_get_client(self, hub): client = http.get_client() assert client.hub is hub client2 = http.get_client(hub) assert client2 is client assert client2.hub is hub kombu-4.1.0/t/unit/async/http/test_curl.py0000644000175000017500000001156213130603207020453 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import pytest from case import Mock, call, patch, skip from kombu.async.http.curl import READ, WRITE, CurlClient @skip.if_pypy() @skip.unless_module('pycurl') @pytest.mark.usefixtures('hub') class test_CurlClient: class Client(CurlClient): Curl = Mock(name='Curl') def test_when_pycurl_missing(self, patching): patching('kombu.async.http.curl.pycurl', None) with pytest.raises(ImportError): self.Client() def test_max_clients_set(self): x = self.Client(max_clients=303) assert x.max_clients == 303 def test_init(self): with patch('kombu.async.http.curl.pycurl') as _pycurl: x = self.Client() assert x._multi is not None assert x._pending is not None assert x._free_list is not None assert x._fds is not None assert x._socket_action == x._multi.socket_action assert len(x._curls) == x.max_clients assert x._timeout_check_tref x._multi.setopt.assert_has_calls([ call(_pycurl.M_TIMERFUNCTION, x._set_timeout), call(_pycurl.M_SOCKETFUNCTION, x._handle_socket), ]) def test_close(self): with patch('kombu.async.http.curl.pycurl'): x = self.Client() x._timeout_check_tref = Mock(name='timeout_check_tref') x.close() x._timeout_check_tref.cancel.assert_called_with() for _curl in x._curls: _curl.close.assert_called_with() x._multi.close.assert_called_with() def test_add_request(self): with patch('kombu.async.http.curl.pycurl'): x = self.Client() x._process_queue = Mock(name='_process_queue') x._set_timeout = Mock(name='_set_timeout') request = Mock(name='request') x.add_request(request) assert request in x._pending x._process_queue.assert_called_with() x._set_timeout.assert_called_with(0) def test_handle_socket(self): with patch('kombu.async.http.curl.pycurl') as _pycurl: hub = Mock(name='hub') x = self.Client(hub) fd = Mock(name='fd1') # POLL_REMOVE x._fds[fd] = fd x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl) hub.remove.assert_called_with(fd) assert fd not in x._fds x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl) # POLL_IN hub = x.hub = Mock(name='hub') fds = [fd, Mock(name='fd2'), Mock(name='fd3')] x._fds = {f: f for f in fds} x._handle_socket(_pycurl.POLL_IN, fd, x._multi, None, _pycurl) hub.remove.assert_has_calls([call(fd)]) hub.add_reader.assert_called_with(fd, x.on_readable, fd) assert x._fds[fd] == READ # POLL_OUT hub = x.hub = Mock(name='hub') x._handle_socket(_pycurl.POLL_OUT, fd, x._multi, None, _pycurl) hub.add_writer.assert_called_with(fd, x.on_writable, fd) assert x._fds[fd] == WRITE # POLL_INOUT hub = x.hub = Mock(name='hub') x._handle_socket(_pycurl.POLL_INOUT, fd, x._multi, None, _pycurl) hub.add_reader.assert_called_with(fd, x.on_readable, fd) hub.add_writer.assert_called_with(fd, x.on_writable, fd) assert x._fds[fd] == READ | WRITE # UNKNOWN EVENT hub = x.hub = Mock(name='hub') x._handle_socket(0xff3f, fd, x._multi, None, _pycurl) # FD NOT IN FDS hub = x.hub = Mock(name='hub') x._fds.clear() x._handle_socket(0xff3f, fd, x._multi, None, _pycurl) hub.remove.assert_not_called() def test_set_timeout(self): x = self.Client() x._set_timeout(100) def test_timeout_check(self): with patch('kombu.async.http.curl.pycurl') as _pycurl: x = self.Client() x._process_pending_requests = Mock(name='process_pending') x._multi.socket_all.return_value = 333, 1 _pycurl.error = KeyError x._timeout_check(_pycurl=_pycurl) x._multi.socket_all.return_value = None x._multi.socket_all.side_effect = _pycurl.error(333) x._timeout_check(_pycurl=_pycurl) def test_on_readable_on_writeable(self): with patch('kombu.async.http.curl.pycurl') as _pycurl: x = self.Client() x._on_event = Mock(name='on_event') fd = Mock(name='fd') x.on_readable(fd, _pycurl=_pycurl) x._on_event.assert_called_with(fd, _pycurl.CSELECT_IN) x.on_writable(fd, _pycurl=_pycurl) x._on_event.assert_called_with(fd, _pycurl.CSELECT_OUT) kombu-4.1.0/t/unit/test_messaging.py0000644000175000017500000005717013130603207017374 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pickle import pytest import sys from collections import defaultdict from case import Mock, patch from kombu import Connection, Consumer, Producer, Exchange, Queue from kombu.exceptions import MessageStateError from kombu.utils import json from kombu.utils.functional import ChannelPromise from t.mocks import Transport class test_Producer: def setup(self): self.exchange = Exchange('foo', 'direct') self.connection = Connection(transport=Transport) self.connection.connect() assert self.connection.connection.connected assert not self.exchange.is_bound def test_repr(self): p = Producer(self.connection) assert repr(p) def test_pickle(self): chan = Mock() producer = Producer(chan, serializer='pickle') p2 = pickle.loads(pickle.dumps(producer)) assert p2.serializer == producer.serializer def test_no_channel(self): p = Producer(None) assert not p._channel @patch('kombu.messaging.maybe_declare') def test_maybe_declare(self, maybe_declare): p = self.connection.Producer() q = Queue('foo') p.maybe_declare(q) maybe_declare.assert_called_with(q, p.channel, False) @patch('kombu.common.maybe_declare') def test_maybe_declare_when_entity_false(self, maybe_declare): p = self.connection.Producer() p.maybe_declare(None) maybe_declare.assert_not_called() def test_auto_declare(self): channel = self.connection.channel() p = Producer(channel, self.exchange, auto_declare=True) # creates Exchange clone at bind assert p.exchange is not self.exchange assert p.exchange.is_bound # auto_declare declares exchange' assert 'exchange_declare' not in channel p.publish('foo') assert 'exchange_declare' in channel def test_manual_declare(self): channel = self.connection.channel() p = Producer(channel, self.exchange, auto_declare=False) assert p.exchange.is_bound # auto_declare=False does not declare exchange assert 'exchange_declare' not in channel # p.declare() declares exchange') p.declare() assert 'exchange_declare' in channel def test_prepare(self): message = {'the quick brown fox': 'jumps over the lazy dog'} channel = self.connection.channel() p = Producer(channel, self.exchange, serializer='json') m, ctype, cencoding = p._prepare(message, headers={}) assert json.loads(m) == message assert ctype == 'application/json' assert cencoding == 'utf-8' def test_prepare_compression(self): message = {'the quick brown fox': 'jumps over the lazy dog'} channel = self.connection.channel() p = Producer(channel, self.exchange, serializer='json') headers = {} m, ctype, cencoding = p._prepare(message, compression='zlib', headers=headers) assert ctype == 'application/json' assert cencoding == 'utf-8' assert headers['compression'] == 'application/x-gzip' import zlib assert json.loads(zlib.decompress(m).decode('utf-8')) == message def test_prepare_custom_content_type(self): message = 'the quick brown fox'.encode('utf-8') channel = self.connection.channel() p = Producer(channel, self.exchange, serializer='json') m, ctype, cencoding = p._prepare(message, content_type='custom') assert m == message assert ctype == 'custom' assert cencoding == 'binary' m, ctype, cencoding = p._prepare(message, content_type='custom', content_encoding='alien') assert m == message assert ctype == 'custom' assert cencoding == 'alien' def test_prepare_is_already_unicode(self): message = 'the quick brown fox' channel = self.connection.channel() p = Producer(channel, self.exchange, serializer='json') m, ctype, cencoding = p._prepare(message, content_type='text/plain') assert m == message.encode('utf-8') assert ctype == 'text/plain' assert cencoding == 'utf-8' m, ctype, cencoding = p._prepare(message, content_type='text/plain', content_encoding='utf-8') assert m == message.encode('utf-8') assert ctype == 'text/plain' assert cencoding == 'utf-8' def test_publish_with_Exchange_instance(self): p = self.connection.Producer() p.channel = Mock() p.channel.connection.client.declared_entities = set() p.publish('hello', exchange=Exchange('foo'), delivery_mode='transient') assert p._channel.basic_publish.call_args[1]['exchange'] == 'foo' def test_publish_with_expiration(self): p = self.connection.Producer() p.channel = Mock() p.channel.connection.client.declared_entities = set() p.publish('hello', exchange=Exchange('foo'), expiration=10) properties = p._channel.prepare_message.call_args[0][5] assert properties['expiration'] == '10000' def test_publish_with_reply_to(self): p = self.connection.Producer() p.channel = Mock() p.channel.connection.client.declared_entities = set() assert not p.exchange.name p.publish('hello', exchange=Exchange('foo'), reply_to=Queue('foo')) properties = p._channel.prepare_message.call_args[0][5] assert properties['reply_to'] == 'foo' def test_set_on_return(self): chan = Mock() chan.events = defaultdict(Mock) p = Producer(ChannelPromise(lambda: chan), on_return='on_return') p.channel chan.events['basic_return'].add.assert_called_with('on_return') def test_publish_retry_calls_ensure(self): p = Producer(Mock()) p._connection = Mock() p._connection.declared_entities = set() ensure = p.connection.ensure = Mock() p.publish('foo', exchange='foo', retry=True) ensure.assert_called() def test_publish_retry_with_declare(self): p = self.connection.Producer() p.maybe_declare = Mock() p.connection.ensure = Mock() ex = Exchange('foo') p._publish('hello', 0, '', '', {}, {}, 'rk', 0, 0, ex, declare=[ex]) p.maybe_declare.assert_called_with(ex) def test_revive_when_channel_is_connection(self): p = self.connection.Producer() p.exchange = Mock() new_conn = Connection('memory://') defchan = new_conn.default_channel p.revive(new_conn) assert p.channel is defchan p.exchange.revive.assert_called_with(defchan) def test_enter_exit(self): p = self.connection.Producer() p.release = Mock() assert p.__enter__() is p p.__exit__() p.release.assert_called_with() def test_connection_property_handles_AttributeError(self): p = self.connection.Producer() p.channel = object() p.__connection__ = None assert p.connection is None def test_publish(self): channel = self.connection.channel() p = Producer(channel, self.exchange, serializer='json') message = {'the quick brown fox': 'jumps over the lazy dog'} ret = p.publish(message, routing_key='process') assert 'prepare_message' in channel assert 'basic_publish' in channel m, exc, rkey = ret assert json.loads(m['body']) == message assert m['content_type'] == 'application/json' assert m['content_encoding'] == 'utf-8' assert m['priority'] == 0 assert m['properties']['delivery_mode'] == 2 assert exc == p.exchange.name assert rkey == 'process' def test_no_exchange(self): chan = self.connection.channel() p = Producer(chan) assert not p.exchange.name def test_revive(self): chan = self.connection.channel() p = Producer(chan) chan2 = self.connection.channel() p.revive(chan2) assert p.channel is chan2 assert p.exchange.channel is chan2 def test_on_return(self): chan = self.connection.channel() def on_return(exception, exchange, routing_key, message): pass p = Producer(chan, on_return=on_return) assert on_return in chan.events['basic_return'] assert p.on_return class test_Consumer: def setup(self): self.connection = Connection(transport=Transport) self.connection.connect() assert self.connection.connection.connected self.exchange = Exchange('foo', 'direct') def test_accept(self): a = Consumer(self.connection) assert a.accept is None b = Consumer(self.connection, accept=['json', 'pickle']) assert b.accept == { 'application/json', 'application/x-python-serialize', } c = Consumer(self.connection, accept=b.accept) assert b.accept == c.accept def test_enter_exit_cancel_raises(self): c = Consumer(self.connection) c.cancel = Mock(name='Consumer.cancel') c.cancel.side_effect = KeyError('foo') with c: pass c.cancel.assert_called_with() def test_enter_exit_cancel_not_called_on_connection_error(self): c = Consumer(self.connection) c.cancel = Mock(name='Consumer.cancel') assert self.connection.connection_errors with pytest.raises(self.connection.connection_errors[0]): with c: raise self.connection.connection_errors[0]() c.cancel.assert_not_called() def test_receive_callback_accept(self): message = Mock(name='Message') message.errors = [] callback = Mock(name='on_message') c = Consumer(self.connection, accept=['json'], on_message=callback) c.on_decode_error = None c.channel = Mock(name='channel') c.channel.message_to_python = None c._receive_callback(message) callback.assert_called_with(message) assert message.accept == c.accept def test_accept__content_disallowed(self): conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = conn.Producer() p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) callback = Mock(name='callback') with conn.Consumer(queues=[q], callbacks=[callback]) as consumer: with pytest.raises(consumer.ContentDisallowed): conn.drain_events(timeout=1) callback.assert_not_called() def test_accept__content_allowed(self): conn = Connection('memory://') q = Queue('foo', exchange=self.exchange) p = conn.Producer() p.publish( {'complex': object()}, declare=[q], exchange=self.exchange, serializer='pickle', ) callback = Mock(name='callback') with conn.Consumer(queues=[q], accept=['pickle'], callbacks=[callback]): conn.drain_events(timeout=1) callback.assert_called() body, message = callback.call_args[0] assert body['complex'] def test_set_no_channel(self): c = Consumer(None) assert c.channel is None c.revive(Mock()) assert c.channel def test_set_no_ack(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True, no_ack=True) assert consumer.no_ack def test_add_queue_when_auto_declare(self): consumer = self.connection.Consumer(auto_declare=True) q = Mock() q.return_value = q consumer.add_queue(q) assert q in consumer.queues q.declare.assert_called_with() def test_add_queue_when_not_auto_declare(self): consumer = self.connection.Consumer(auto_declare=False) q = Mock() q.return_value = q consumer.add_queue(q) assert q in consumer.queues assert not q.declare.call_count def test_consume_without_queues_returns(self): consumer = self.connection.Consumer() consumer.queues[:] = [] assert consumer.consume() is None def test_consuming_from(self): consumer = self.connection.Consumer() consumer.queues[:] = [Queue('a'), Queue('b'), Queue('d')] consumer._active_tags = {'a': 1, 'b': 2} assert not consumer.consuming_from(Queue('c')) assert not consumer.consuming_from('c') assert not consumer.consuming_from(Queue('d')) assert not consumer.consuming_from('d') assert consumer.consuming_from(Queue('a')) assert consumer.consuming_from(Queue('b')) assert consumer.consuming_from('b') def test_receive_callback_without_m2p(self): channel = self.connection.channel() c = channel.Consumer() m2p = getattr(channel, 'message_to_python') channel.message_to_python = None try: message = Mock() message.errors = [] message.decode.return_value = 'Hello' recv = c.receive = Mock() c._receive_callback(message) recv.assert_called_with('Hello', message) finally: channel.message_to_python = m2p def test_receive_callback__message_errors(self): channel = self.connection.channel() channel.message_to_python = None c = channel.Consumer() message = Mock() try: raise KeyError('foo') except KeyError: message.errors = [sys.exc_info()] message._reraise_error.side_effect = KeyError() with pytest.raises(KeyError): c._receive_callback(message) def test_set_callbacks(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') callbacks = [lambda x, y: x, lambda x, y: x] consumer = Consumer(channel, queue, auto_declare=True, callbacks=callbacks) assert consumer.callbacks == callbacks def test_auto_declare(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.consume() consumer.consume() # twice is a noop assert consumer.queues[0] is not queue assert consumer.queues[0].is_bound assert consumer.queues[0].exchange.is_bound assert consumer.queues[0].exchange is not self.exchange for meth in ('exchange_declare', 'queue_declare', 'queue_bind', 'basic_consume'): assert meth in channel assert channel.called.count('basic_consume') == 1 assert consumer._active_tags consumer.cancel_by_queue(queue.name) consumer.cancel_by_queue(queue.name) assert not consumer._active_tags def test_consumer_tag_prefix(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, tag_prefix='consumer_') consumer.consume() assert consumer._active_tags[queue.name].startswith('consumer_') def test_manual_declare(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=False) assert consumer.queues[0] is not queue assert consumer.queues[0].is_bound assert consumer.queues[0].exchange.is_bound assert consumer.queues[0].exchange is not self.exchange for meth in ('exchange_declare', 'queue_declare', 'basic_consume'): assert meth not in channel consumer.declare() for meth in ('exchange_declare', 'queue_declare', 'queue_bind'): assert meth in channel assert 'basic_consume' not in channel consumer.consume() assert 'basic_consume' in channel def test_consume__cancel(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.consume() consumer.cancel() assert 'basic_cancel' in channel assert not consumer._active_tags def test___enter____exit__(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) context = consumer.__enter__() assert context is consumer assert consumer._active_tags res = consumer.__exit__(None, None, None) assert not res assert 'basic_cancel' in channel assert not consumer._active_tags def test_flow(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.flow(False) assert 'flow' in channel def test_qos(self): channel = self.connection.channel() queue = Queue('qname', self.exchange, 'rkey') consumer = Consumer(channel, queue, auto_declare=True) consumer.qos(30, 10, False) assert 'basic_qos' in channel def test_purge(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') b2 = Queue('qname2', self.exchange, 'rkey') b3 = Queue('qname3', self.exchange, 'rkey') b4 = Queue('qname4', self.exchange, 'rkey') consumer = Consumer(channel, [b1, b2, b3, b4], auto_declare=True) consumer.purge() assert channel.called.count('queue_purge') == 4 def test_multiple_queues(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') b2 = Queue('qname2', self.exchange, 'rkey') b3 = Queue('qname3', self.exchange, 'rkey') b4 = Queue('qname4', self.exchange, 'rkey') consumer = Consumer(channel, [b1, b2, b3, b4]) consumer.consume() assert channel.called.count('exchange_declare') == 4 assert channel.called.count('queue_declare') == 4 assert channel.called.count('queue_bind') == 4 assert channel.called.count('basic_consume') == 4 assert len(consumer._active_tags) == 4 consumer.cancel() assert channel.called.count('basic_cancel') == 4 assert not len(consumer._active_tags) def test_receive_callback(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) received = [] def callback(message_data, message): received.append(message_data) message.ack() message.payload # trigger cache consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) assert 'basic_ack' in channel assert 'message_to_python' in channel assert received[0] == {'foo': 'bar'} def test_basic_ack_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.ack() message.ack() consumer.register_callback(callback) with pytest.raises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) def test_basic_reject(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.reject() consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject' in channel def test_basic_reject_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.reject() message.reject() consumer.register_callback(callback) with pytest.raises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject' in channel def test_basic_reject__requeue(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.requeue() consumer.register_callback(callback) consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject:requeue' in channel def test_basic_reject__requeue_twice(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) def callback(message_data, message): message.requeue() message.requeue() consumer.register_callback(callback) with pytest.raises(MessageStateError): consumer._receive_callback({'foo': 'bar'}) assert 'basic_reject:requeue' in channel def test_receive_without_callbacks_raises(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) with pytest.raises(NotImplementedError): consumer.receive(1, 2) def test_decode_error(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) consumer.channel.throw_decode_error = True with pytest.raises(ValueError): consumer._receive_callback({'foo': 'bar'}) def test_on_decode_error_callback(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') thrown = [] def on_decode_error(msg, exc): thrown.append((msg.body, exc)) consumer = Consumer(channel, [b1], on_decode_error=on_decode_error) consumer.channel.throw_decode_error = True consumer._receive_callback({'foo': 'bar'}) assert thrown m, exc = thrown[0] assert json.loads(m) == {'foo': 'bar'} assert isinstance(exc, ValueError) def test_recover(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) consumer.recover() assert 'basic_recover' in channel def test_revive(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') consumer = Consumer(channel, [b1]) channel2 = self.connection.channel() consumer.revive(channel2) assert consumer.channel is channel2 assert consumer.queues[0].channel is channel2 assert consumer.queues[0].exchange.channel is channel2 def test_revive__with_prefetch_count(self): channel = Mock(name='channel') b1 = Queue('qname1', self.exchange, 'rkey') Consumer(channel, [b1], prefetch_count=14) channel.basic_qos.assert_called_with(0, 14, False) def test__repr__(self): channel = self.connection.channel() b1 = Queue('qname1', self.exchange, 'rkey') assert repr(Consumer(channel, [b1])) def test_connection_property_handles_AttributeError(self): p = self.connection.Consumer() p.channel = object() assert p.connection is None kombu-4.1.0/t/unit/test_common.py0000644000175000017500000003256513130603207016710 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pytest import socket from amqp import RecoverableConnectionError from case import ContextMock, Mock, patch from kombu import common from kombu.common import ( Broadcast, maybe_declare, send_reply, collect_replies, declaration_cached, ignore_errors, QoS, PREFETCH_COUNT_MAX, generate_oid ) from t.mocks import MockPool def test_generate_oid(): from uuid import NAMESPACE_OID from kombu.five import bytes_if_py2 instance = Mock() args = (1, 1001, 2001, id(instance)) ent = bytes_if_py2('%x-%x-%x-%x' % args) with patch('kombu.common.uuid3') as mock_uuid3, \ patch('kombu.common.uuid5') as mock_uuid5: mock_uuid3.side_effect = ValueError mock_uuid3.return_value = 'uuid3-6ba7b812-9dad-11d1-80b4' mock_uuid5.return_value = 'uuid5-6ba7b812-9dad-11d1-80b4' oid = generate_oid(1, 1001, 2001, instance) mock_uuid5.assert_called_once_with(NAMESPACE_OID, ent) assert oid == 'uuid5-6ba7b812-9dad-11d1-80b4' def test_ignore_errors(): connection = Mock() connection.channel_errors = (KeyError,) connection.connection_errors = (KeyError,) with ignore_errors(connection): raise KeyError() def raising(): raise KeyError() ignore_errors(connection, raising) connection.channel_errors = connection.connection_errors = () with pytest.raises(KeyError): with ignore_errors(connection): raise KeyError() class test_declaration_cached: def test_when_cached(self): chan = Mock() chan.connection.client.declared_entities = ['foo'] assert declaration_cached('foo', chan) def test_when_not_cached(self): chan = Mock() chan.connection.client.declared_entities = ['bar'] assert not declaration_cached('foo', chan) class test_Broadcast: def test_arguments(self): q = Broadcast(name='test_Broadcast') assert q.name.startswith('bcast.') assert q.alias == 'test_Broadcast' assert q.auto_delete assert q.exchange.name == 'test_Broadcast' assert q.exchange.type == 'fanout' q = Broadcast('test_Broadcast', 'explicit_queue_name') assert q.name == 'explicit_queue_name' assert q.exchange.name == 'test_Broadcast' q2 = q(Mock()) assert q2.name == q.name class test_maybe_declare: def test_cacheable(self): channel = Mock() client = channel.connection.client = Mock() client.declared_entities = set() entity = Mock() entity.can_cache_declaration = True entity.auto_delete = False entity.is_bound = True entity.channel = channel maybe_declare(entity, channel) assert entity.declare.call_count == 1 assert hash(entity) in channel.connection.client.declared_entities maybe_declare(entity, channel) assert entity.declare.call_count == 1 entity.channel.connection = None with pytest.raises(RecoverableConnectionError): maybe_declare(entity) def test_binds_entities(self): channel = Mock() channel.connection.client.declared_entities = set() entity = Mock() entity.can_cache_declaration = True entity.is_bound = False entity.bind.return_value = entity entity.bind.return_value.channel = channel maybe_declare(entity, channel) entity.bind.assert_called_with(channel) def test_with_retry(self): channel = Mock() client = channel.connection.client = Mock() client.declared_entities = set() entity = Mock() entity.can_cache_declaration = True entity.is_bound = True entity.channel = channel maybe_declare(entity, channel, retry=True) assert channel.connection.client.ensure.call_count class test_replies: def test_send_reply(self): req = Mock() req.content_type = 'application/json' req.content_encoding = 'binary' req.properties = {'reply_to': 'hello', 'correlation_id': 'world'} channel = Mock() exchange = Mock() exchange.is_bound = True exchange.channel = channel producer = Mock() producer.channel = channel producer.channel.connection.client.declared_entities = set() send_reply(exchange, req, {'hello': 'world'}, producer) assert producer.publish.call_count args = producer.publish.call_args assert args[0][0] == {'hello': 'world'} assert args[1] == { 'exchange': exchange, 'routing_key': 'hello', 'correlation_id': 'world', 'serializer': 'json', 'retry': False, 'retry_policy': None, 'content_encoding': 'binary', } @patch('kombu.common.itermessages') def test_collect_replies_with_ack(self, itermessages): conn, channel, queue = Mock(), Mock(), Mock() body, message = Mock(), Mock() itermessages.return_value = [(body, message)] it = collect_replies(conn, channel, queue, no_ack=False) m = next(it) assert m is body itermessages.assert_called_with(conn, channel, queue, no_ack=False) message.ack.assert_called_with() with pytest.raises(StopIteration): next(it) channel.after_reply_message_received.assert_called_with(queue.name) @patch('kombu.common.itermessages') def test_collect_replies_no_ack(self, itermessages): conn, channel, queue = Mock(), Mock(), Mock() body, message = Mock(), Mock() itermessages.return_value = [(body, message)] it = collect_replies(conn, channel, queue) m = next(it) assert m is body itermessages.assert_called_with(conn, channel, queue, no_ack=True) message.ack.assert_not_called() @patch('kombu.common.itermessages') def test_collect_replies_no_replies(self, itermessages): conn, channel, queue = Mock(), Mock(), Mock() itermessages.return_value = [] it = collect_replies(conn, channel, queue) with pytest.raises(StopIteration): next(it) channel.after_reply_message_received.assert_not_called() class test_insured: @patch('kombu.common.logger') def test_ensure_errback(self, logger): common._ensure_errback('foo', 30) logger.error.assert_called() def test_revive_connection(self): on_revive = Mock() channel = Mock() common.revive_connection(Mock(), channel, on_revive) on_revive.assert_called_with(channel) common.revive_connection(Mock(), channel, None) def get_insured_mocks(self, insured_returns=('works', 'ignored')): conn = ContextMock() pool = MockPool(conn) fun = Mock() insured = conn.autoretry.return_value = Mock() insured.return_value = insured_returns return conn, pool, fun, insured def test_insured(self): conn, pool, fun, insured = self.get_insured_mocks() ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'}) assert ret == 'works' conn.ensure_connection.assert_called_with( errback=common._ensure_errback, ) insured.assert_called() i_args, i_kwargs = insured.call_args assert i_args == (2, 2) assert i_kwargs == {'foo': 'bar', 'connection': conn} conn.autoretry.assert_called() ar_args, ar_kwargs = conn.autoretry.call_args assert ar_args == (fun, conn.default_channel) assert ar_kwargs.get('on_revive') assert ar_kwargs.get('errback') def test_insured_custom_errback(self): conn, pool, fun, insured = self.get_insured_mocks() custom_errback = Mock() common.insured(pool, fun, (2, 2), {'foo': 'bar'}, errback=custom_errback) conn.ensure_connection.assert_called_with(errback=custom_errback) class MockConsumer(object): consumers = set() def __init__(self, channel, queues=None, callbacks=None, **kwargs): self.channel = channel self.queues = queues self.callbacks = callbacks def __enter__(self): self.consumers.add(self) return self def __exit__(self, *exc_info): self.consumers.discard(self) class test_itermessages: class MockConnection(object): should_raise_timeout = False def drain_events(self, **kwargs): if self.should_raise_timeout: raise socket.timeout() for consumer in MockConsumer.consumers: for callback in consumer.callbacks: callback('body', 'message') def test_default(self): conn = self.MockConnection() channel = Mock() channel.connection.client = conn conn.Consumer = MockConsumer it = common.itermessages(conn, channel, 'q', limit=1) ret = next(it) assert ret == ('body', 'message') with pytest.raises(StopIteration): next(it) def test_when_raises_socket_timeout(self): conn = self.MockConnection() conn.should_raise_timeout = True channel = Mock() channel.connection.client = conn conn.Consumer = MockConsumer it = common.itermessages(conn, channel, 'q', limit=1) with pytest.raises(StopIteration): next(it) @patch('kombu.common.deque') def test_when_raises_IndexError(self, deque): deque_instance = deque.return_value = Mock() deque_instance.popleft.side_effect = IndexError() conn = self.MockConnection() channel = Mock() conn.Consumer = MockConsumer it = common.itermessages(conn, channel, 'q', limit=1) with pytest.raises(StopIteration): next(it) class test_QoS: class _QoS(QoS): def __init__(self, value): self.value = value QoS.__init__(self, None, value) def set(self, value): return value def test_qos_exceeds_16bit(self): with patch('kombu.common.logger') as logger: callback = Mock() qos = QoS(callback, 10) qos.prev = 100 # cannot use 2 ** 32 because of a bug on macOS Py2.5: # https://jira.mongodb.org/browse/PYTHON-389 qos.set(4294967296) logger.warn.assert_called() callback.assert_called_with(prefetch_count=0) def test_qos_increment_decrement(self): qos = self._QoS(10) assert qos.increment_eventually() == 11 assert qos.increment_eventually(3) == 14 assert qos.increment_eventually(-30) == 14 assert qos.decrement_eventually(7) == 7 assert qos.decrement_eventually() == 6 def test_qos_disabled_increment_decrement(self): qos = self._QoS(0) assert qos.increment_eventually() == 0 assert qos.increment_eventually(3) == 0 assert qos.increment_eventually(-30) == 0 assert qos.decrement_eventually(7) == 0 assert qos.decrement_eventually() == 0 assert qos.decrement_eventually(10) == 0 def test_qos_thread_safe(self): qos = self._QoS(10) def add(): for i in range(1000): qos.increment_eventually() def sub(): for i in range(1000): qos.decrement_eventually() def threaded(funs): from threading import Thread threads = [Thread(target=fun) for fun in funs] for thread in threads: thread.start() for thread in threads: thread.join() threaded([add, add]) assert qos.value == 2010 qos.value = 1000 threaded([add, sub]) # n = 2 assert qos.value == 1000 def test_exceeds_short(self): qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1) qos.update() assert qos.value == PREFETCH_COUNT_MAX - 1 qos.increment_eventually() assert qos.value == PREFETCH_COUNT_MAX qos.increment_eventually() assert qos.value == PREFETCH_COUNT_MAX + 1 qos.decrement_eventually() assert qos.value == PREFETCH_COUNT_MAX qos.decrement_eventually() assert qos.value == PREFETCH_COUNT_MAX - 1 def test_consumer_increment_decrement(self): mconsumer = Mock() qos = QoS(mconsumer.qos, 10) qos.update() assert qos.value == 10 mconsumer.qos.assert_called_with(prefetch_count=10) qos.decrement_eventually() qos.update() assert qos.value == 9 mconsumer.qos.assert_called_with(prefetch_count=9) qos.decrement_eventually() assert qos.value == 8 mconsumer.qos.assert_called_with(prefetch_count=9) assert {'prefetch_count': 9} in mconsumer.qos.call_args # Does not decrement 0 value qos.value = 0 qos.decrement_eventually() assert qos.value == 0 qos.increment_eventually() assert qos.value == 0 def test_consumer_decrement_eventually(self): mconsumer = Mock() qos = QoS(mconsumer.qos, 10) qos.decrement_eventually() assert qos.value == 9 qos.value = 0 qos.decrement_eventually() assert qos.value == 0 def test_set(self): mconsumer = Mock() qos = QoS(mconsumer.qos, 10) qos.set(12) assert qos.prev == 12 qos.set(qos.prev) kombu-4.1.0/t/unit/test_entity.py0000644000175000017500000003040113130603207016717 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import pickle import pytest from case import Mock, call from kombu import Connection, Exchange, Producer, Queue, binding from kombu.abstract import MaybeChannelBound from kombu.exceptions import NotBoundError from kombu.serialization import registry from t.mocks import Transport def get_conn(): return Connection(transport=Transport) class test_binding: def test_constructor(self): x = binding( Exchange('foo'), 'rkey', arguments={'barg': 'bval'}, unbind_arguments={'uarg': 'uval'}, ) assert x.exchange == Exchange('foo') assert x.routing_key == 'rkey' assert x.arguments == {'barg': 'bval'} assert x.unbind_arguments == {'uarg': 'uval'} def test_declare(self): chan = get_conn().channel() x = binding(Exchange('foo'), 'rkey') x.declare(chan) assert 'exchange_declare' in chan def test_declare_no_exchange(self): chan = get_conn().channel() x = binding() x.declare(chan) assert 'exchange_declare' not in chan def test_bind(self): chan = get_conn().channel() x = binding(Exchange('foo')) x.bind(Exchange('bar')(chan)) assert 'exchange_bind' in chan def test_unbind(self): chan = get_conn().channel() x = binding(Exchange('foo')) x.unbind(Exchange('bar')(chan)) assert 'exchange_unbind' in chan def test_repr(self): b = binding(Exchange('foo'), 'rkey') assert 'foo' in repr(b) assert 'rkey' in repr(b) class test_Exchange: def test_bound(self): exchange = Exchange('foo', 'direct') assert not exchange.is_bound assert '= (2, 5): from hashlib import sha256 as _digest else: from sha import new as _digest # noqa def _nobuf(x): return [str(i) if isinstance(i, buffer_t) else i for i in x] def consumeN(conn, consumer, n=1, timeout=30): messages = [] def callback(message_data, message): messages.append(message_data) message.ack() prev, consumer.callbacks = consumer.callbacks, [callback] consumer.consume() seconds = 0 while True: try: conn.drain_events(timeout=1) except socket.timeout: seconds += 1 msg = 'Received %s/%s messages. %s seconds passed.' % ( len(messages), n, seconds) if seconds >= timeout: raise socket.timeout(msg) if seconds > 1: print(msg) if len(messages) >= n: break consumer.cancel() consumer.callback = prev return messages class TransportCase(unittest.TestCase): transport = None prefix = None sep = '.' userid = None password = None event_loop_max = 100 connection_options = {} suppress_disorder_warning = False reliable_purge = True connected = False skip_test_reason = None message_size_limit = None def before_connect(self): pass def after_connect(self, connection): pass def setUp(self): if self.transport: try: self.before_connect() except SkipTest as exc: self.skip_test_reason = str(exc) else: self.do_connect() self.exchange = Exchange(self.prefix, 'direct') self.queue = Queue(self.prefix, self.exchange, self.prefix) def purge(self, names): chan = self.connection.channel() total = 0 for queue in names: while 1: # ensure the queue is completly empty purged = chan.queue_purge(queue=queue) if not purged: break total += purged chan.close() return total def get_connection(self, **options): if self.userid: options.setdefault('userid', self.userid) if self.password: options.setdefault('password', self.password) return Connection(transport=self.transport, **options) def do_connect(self): self.connection = self.get_connection(**self.connection_options) try: self.connection.connect() self.after_connect(self.connection) except self.connection.connection_errors: self.skip_test_reason = '{0} transport cannot connect'.format( self.transport, ) else: self.connected = True def verify_alive(self): if self.transport: if not self.connected: raise SkipTest(self.skip_test_reason) return True def purge_consumer(self, consumer): return self.purge([queue.name for queue in consumer.queues]) def test_produce__consume(self): if not self.verify_alive(): return chan1 = self.connection.channel() consumer = chan1.Consumer(self.queue) self.purge_consumer(consumer) producer = chan1.Producer(self.exchange) producer.publish({'foo': 'bar'}, routing_key=self.prefix) message = consumeN(self.connection, consumer) self.assertDictEqual(message[0], {'foo': 'bar'}) chan1.close() self.purge([self.queue.name]) def test_purge(self): if not self.verify_alive(): return chan1 = self.connection.channel() consumer = chan1.Consumer(self.queue) self.purge_consumer(consumer) producer = chan1.Producer(self.exchange) for i in range(10): producer.publish({'foo': 'bar'}, routing_key=self.prefix) if self.reliable_purge: self.assertEqual(consumer.purge(), 10) self.assertEqual(consumer.purge(), 0) else: purged = 0 while purged < 9: purged += self.purge_consumer(consumer) def _digest(self, data): return _digest(str_to_bytes(data)).hexdigest() def test_produce__consume_large_messages( self, bytes=1048576, n=10, charset=string.punctuation + string.ascii_letters + string.digits): if not self.verify_alive(): return bytes = min(x for x in [bytes, self.message_size_limit] if x) messages = [''.join(random.choice(charset) for j in range(bytes)) + '--%s' % n for i in range(n)] digests = [] chan1 = self.connection.channel() consumer = chan1.Consumer(self.queue) self.purge_consumer(consumer) producer = chan1.Producer(self.exchange) for i, message in enumerate(messages): producer.publish({'text': message, 'i': i}, routing_key=self.prefix) digests.append(self._digest(message)) received = [(msg['i'], msg['text']) for msg in consumeN(self.connection, consumer, n)] self.assertEqual(len(received), n) ordering = [i for i, _ in received] if ordering != list(range(n)) and not self.suppress_disorder_warning: warnings.warn( '%s did not deliver messages in FIFO order: %r' % ( self.transport, ordering)) for i, text in received: if text != messages[i]: raise AssertionError('%i: %r is not %r' % ( i, text[-100:], messages[i][-100:])) self.assertEqual(self._digest(text), digests[i]) chan1.close() self.purge([self.queue.name]) def P(self, rest): return '%s%s%s' % (self.prefix, self.sep, rest) def test_produce__consume_multiple(self): if not self.verify_alive(): return chan1 = self.connection.channel() producer = chan1.Producer(self.exchange) b1 = Queue(self.P('b1'), self.exchange, 'b1')(chan1) b2 = Queue(self.P('b2'), self.exchange, 'b2')(chan1) b3 = Queue(self.P('b3'), self.exchange, 'b3')(chan1) [q.declare() for q in (b1, b2, b3)] self.purge([b1.name, b2.name, b3.name]) producer.publish('b1', routing_key='b1') producer.publish('b2', routing_key='b2') producer.publish('b3', routing_key='b3') chan1.close() chan2 = self.connection.channel() consumer = chan2.Consumer([b1, b2, b3]) messages = consumeN(self.connection, consumer, 3) self.assertItemsEqual(_nobuf(messages), ['b1', 'b2', 'b3']) chan2.close() self.purge([self.P('b1'), self.P('b2'), self.P('b3')]) def test_timeout(self): if not self.verify_alive(): return chan = self.connection.channel() self.purge([self.queue.name]) consumer = chan.Consumer(self.queue) self.assertRaises( socket.timeout, self.connection.drain_events, timeout=0.3, ) consumer.cancel() chan.close() def test_basic_get(self): if not self.verify_alive(): return chan1 = self.connection.channel() producer = chan1.Producer(self.exchange) chan2 = self.connection.channel() queue = Queue(self.P('basic_get'), self.exchange, 'basic_get') queue = queue(chan2) queue.declare() producer.publish({'basic.get': 'this'}, routing_key='basic_get') chan1.close() for i in range(self.event_loop_max): m = queue.get() if m: break time.sleep(0.1) self.assertEqual(m.payload, {'basic.get': 'this'}) self.purge([queue.name]) chan2.close() def test_cyclic_reference_transport(self): if not self.verify_alive(): return def _createref(): conn = self.get_connection() conn.transport conn.close() return weakref.ref(conn) self.assertIsNone(_createref()()) def test_cyclic_reference_connection(self): if not self.verify_alive(): return def _createref(): conn = self.get_connection() conn.connect() conn.close() return weakref.ref(conn) self.assertIsNone(_createref()()) def test_cyclic_reference_channel(self): if not self.verify_alive(): return def _createref(): conn = self.get_connection() conn.connect() chanrefs = [] try: for i in range(100): channel = conn.channel() chanrefs.append(weakref.ref(channel)) channel.close() finally: conn.close() return chanrefs for chanref in _createref(): self.assertIsNone(chanref()) def tearDown(self): if self.transport and self.connected: self.connection.close() kombu-4.1.0/requirements/0000755000175000017500000000000013134154263015304 5ustar omeromer00000000000000kombu-4.1.0/requirements/default.txt0000644000175000017500000000002113130603207017453 0ustar omeromer00000000000000amqp>=2.1.4,<3.0 kombu-4.1.0/requirements/test-ci.txt0000644000175000017500000000012213130603207017401 0ustar omeromer00000000000000pytest-cov codecov redis PyYAML msgpack-python>0.2.0 -r extras/sqs.txt sqlalchemy kombu-4.1.0/requirements/extras/0000755000175000017500000000000013134154263016612 5ustar omeromer00000000000000kombu-4.1.0/requirements/extras/qpid.txt0000644000175000017500000000004313130603207020276 0ustar omeromer00000000000000qpid-python>=0.26 qpid-tools>=0.26 kombu-4.1.0/requirements/extras/sqlalchemy.txt0000644000175000017500000000001313130603207021500 0ustar omeromer00000000000000sqlalchemy kombu-4.1.0/requirements/extras/zookeeper.txt0000644000175000017500000000001513130603207021343 0ustar omeromer00000000000000kazoo>=1.3.1 kombu-4.1.0/requirements/extras/msgpack.txt0000644000175000017500000000002613130603207020767 0ustar omeromer00000000000000msgpack-python>=0.4.7 kombu-4.1.0/requirements/extras/etcd.txt0000644000175000017500000000002313130603207020256 0ustar omeromer00000000000000python-etcd>=0.4.3 kombu-4.1.0/requirements/extras/redis.txt0000644000175000017500000000001513130603207020446 0ustar omeromer00000000000000redis>=2.8.0 kombu-4.1.0/requirements/extras/librabbitmq.txt0000644000175000017500000000002313130603207021627 0ustar omeromer00000000000000librabbitmq>=1.5.2 kombu-4.1.0/requirements/extras/mongodb.txt0000644000175000017500000000002413130603207020765 0ustar omeromer00000000000000pymongo>=2.6.2,<3.0 kombu-4.1.0/requirements/extras/slmq.txt0000644000175000017500000000003313130603207020314 0ustar omeromer00000000000000softlayer_messaging>=1.0.3 kombu-4.1.0/requirements/extras/couchdb.txt0000644000175000017500000000001213130603207020744 0ustar omeromer00000000000000pycouchdb kombu-4.1.0/requirements/extras/consul.txt0000644000175000017500000000002513130603207020644 0ustar omeromer00000000000000python-consul>=0.6.0 kombu-4.1.0/requirements/extras/sqs.txt0000644000175000017500000000002413130603207020146 0ustar omeromer00000000000000boto3>=1.4.4 pycurl kombu-4.1.0/requirements/extras/pyro.txt0000644000175000017500000000000613130603207020331 0ustar omeromer00000000000000pyro4 kombu-4.1.0/requirements/extras/yaml.txt0000644000175000017500000000001513130603207020302 0ustar omeromer00000000000000PyYAML>=3.10 kombu-4.1.0/requirements/pkgutils.txt0000644000175000017500000000016713130603207017704 0ustar omeromer00000000000000setuptools>=20.6.7 wheel>=0.29.0 flake8>=2.5.4 flakeplus>=1.1 tox>=2.3.1 sphinx2rst>=1.0 bumpversion pydocstyle==1.1.1 kombu-4.1.0/requirements/funtest.txt0000644000175000017500000000023613130603207017527 0ustar omeromer00000000000000# redis transport redis # MongoDB transport pymongo # Zookeeper transport kazoo # SQS transport boto3 # Qpid transport qpid-python>=0.26 qpid-tools>=0.26 kombu-4.1.0/requirements/test.txt0000644000175000017500000000003413130603207017012 0ustar omeromer00000000000000pytz>dev case>=1.5.2 pytest kombu-4.1.0/requirements/docs.txt0000644000175000017500000000006513130603207016767 0ustar omeromer00000000000000sphinx_celery>=1.1 librabbitmq -r extras/mongodb.txt kombu-4.1.0/requirements/test-ci-py2.txt0000644000175000017500000000002213130603207020110 0ustar omeromer00000000000000-r extras/sqs.txt kombu-4.1.0/requirements/dev.txt0000644000175000017500000000013713130603207016615 0ustar omeromer00000000000000https://github.com/celery/py-amqp/zipball/master https://github.com/celery/vine/zipball/master kombu-4.1.0/setup.py0000644000175000017500000001151413134153516014275 0ustar omeromer00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- import os import re import sys import codecs import setuptools import setuptools.command.test from distutils.command.install import INSTALL_SCHEMES if sys.version_info < (2, 7): raise Exception('Kombu 4.0 requires Python 2.7 or higher.') try: from setuptools import setup except ImportError: from distutils.core import setup # noqa # -- Parse meta re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)') re_doc = re.compile(r'^"""(.+?)"""') def add_default(m): attr_name, attr_value = m.groups() return ((attr_name, attr_value.strip("\"'")),) def add_doc(m): return (('doc', m.groups()[0]),) pats = {re_meta: add_default, re_doc: add_doc} here = os.path.abspath(os.path.dirname(__file__)) meta_fh = open(os.path.join(here, 'kombu/__init__.py')) try: meta = {} for line in meta_fh: if line.strip() == '# -eof meta-': break for pattern, handler in pats.items(): m = pattern.match(line.strip()) if m: meta.update(handler(m)) finally: meta_fh.close() # -- data_files = [] root_dir = os.path.dirname(__file__) if root_dir != '': os.chdir(root_dir) src_dir = 'kombu' def fullsplit(path, result=None): if result is None: result = [] head, tail = os.path.split(path) if head == '': return [tail] + result if head == path: return result return fullsplit(head, [tail] + result) for scheme in list(INSTALL_SCHEMES.values()): scheme['data'] = scheme['purelib'] for dirpath, dirnames, filenames in os.walk(src_dir): # Ignore dirnames that start with '.' for i, dirname in enumerate(dirnames): if dirname.startswith('.'): del dirnames[i] for filename in filenames: if not filename.endswith('.py'): data_files.append( [dirpath, [os.path.join(dirpath, f) for f in filenames]], ) if os.path.exists('README.rst'): long_description = codecs.open('README.rst', 'r', 'utf-8').read() else: long_description = 'See https://pypi.python.org/pypi/kombu' # -*- Installation Requires -*- py_version = sys.version_info is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') def strip_comments(l): return l.split('#', 1)[0].strip() def reqs(*f): return [ r for r in ( strip_comments(l) for l in open( os.path.join(os.getcwd(), 'requirements', *f)).readlines() ) if r] def extras(*p): return reqs('extras', *p) class pytest(setuptools.command.test.test): user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')] def initialize_options(self): setuptools.command.test.test.initialize_options(self) self.pytest_args = [] def run_tests(self): import pytest sys.exit(pytest.main(self.pytest_args)) setup( name='kombu', packages=setuptools.find_packages(exclude=['t', 't.*']), version=meta['version'], description=meta['doc'], long_description=long_description, keywords='messaging message amqp rabbitmq redis actor producer consumer', author=meta['author'], author_email=meta['contact'], url=meta['homepage'], platforms=['any'], data_files=data_files, zip_safe=False, license='BSD', cmdclass={'test': pytest}, install_requires=reqs('default.txt'), tests_require=reqs('test.txt'), extras_require={ 'msgpack': extras('msgpack.txt'), 'yaml': extras('yaml.txt'), 'redis': extras('redis.txt'), 'mongodb': extras('mongodb.txt'), 'sqs': extras('sqs.txt'), 'zookeeper': extras('zookeeper.txt'), 'sqlalchemy': extras('sqlalchemy.txt'), 'librabbitmq': extras('librabbitmq.txt'), 'pyro': extras('pyro.txt'), 'slmq': extras('slmq.txt'), 'qpid': extras('qpid.txt'), 'consul': extras('consul.txt'), }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Programming Language :: Python :: Implementation :: Jython', 'Intended Audience :: Developers', 'Topic :: Communications', 'Topic :: System :: Distributed Computing', 'Topic :: System :: Networking', 'Topic :: Software Development :: Libraries :: Python Modules', ], ) kombu-4.1.0/setup.cfg0000644000175000017500000000050013134154263014375 0ustar omeromer00000000000000[tool:pytest] testpaths = t/unit/ python_classes = test_* [build_sphinx] source-dir = docs/ build-dir = docs/_build all_files = 1 [flake8] ignore = N806, N802, N801, N803 [pep257] ignore = D102,D104,D203,D105,D213 [bdist_rpm] requires = amqp >= 2. [bdist_wheel] universal = 1 [egg_info] tag_build = tag_date = 0 kombu-4.1.0/PKG-INFO0000644000175000017500000004033313134154263013661 0ustar omeromer00000000000000Metadata-Version: 1.1 Name: kombu Version: 4.1.0 Summary: Messaging library for Python. Home-page: https://kombu.readthedocs.io Author: Ask Solem Author-email: ask@celeryproject.org License: BSD Description: ======================================== kombu - Messaging library for Python ======================================== |build-status| |coverage| |license| |wheel| |pyversion| |pyimp| :Version: 4.1.0 :Web: http://kombu.me/ :Download: http://pypi.python.org/pypi/kombu/ :Source: https://github.com/celery/kombu/ :Keywords: messaging, amqp, rabbitmq, redis, mongodb, python, queue About ===== `Kombu` is a messaging library for Python. The aim of `Kombu` is to make messaging in Python as easy as possible by providing an idiomatic high-level interface for the AMQ protocol, and also provide proven and tested solutions to common messaging problems. `AMQP`_ is the Advanced Message Queuing Protocol, an open standard protocol for message orientation, queuing, routing, reliability and security, for which the `RabbitMQ`_ messaging server is the most popular implementation. Features ======== * Allows application authors to support several message server solutions by using pluggable transports. * AMQP transport using the `py-amqp`_, `librabbitmq`_, or `qpid-python`_ libraries. * High performance AMQP transport written in C - when using `librabbitmq`_ This is automatically enabled if librabbitmq is installed: :: $ pip install librabbitmq * Virtual transports makes it really easy to add support for non-AMQP transports. There is already built-in support for `Redis`_, `Amazon SQS`_, `ZooKeeper`_, `SoftLayer MQ`_ and `Pyro`_. * In-memory transport for unit testing. * Supports automatic encoding, serialization and compression of message payloads. * Consistent exception handling across transports. * The ability to ensure that an operation is performed by gracefully handling connection and channel errors. * Several annoyances with `amqplib`_ has been fixed, like supporting timeouts and the ability to wait for events on more than one channel. * Projects already using `carrot`_ can easily be ported by using a compatibility layer. For an introduction to AMQP you should read the article `Rabbits and warrens`_, and the `Wikipedia article about AMQP`_. .. _`RabbitMQ`: https://www.rabbitmq.com/ .. _`AMQP`: https://amqp.org .. _`py-amqp`: https://pypi.python.org/pypi/amqp/ .. _`qpid-python`: https://pypi.python.org/pypi/qpid-python/ .. _`Redis`: https://redis.io .. _`Amazon SQS`: https://aws.amazon.com/sqs/ .. _`Zookeeper`: https://zookeeper.apache.org/ .. _`Rabbits and warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ .. _`amqplib`: https://barryp.org/software/py-amqplib/ .. _`Wikipedia article about AMQP`: https://en.wikipedia.org/wiki/AMQP .. _`carrot`: https://pypi.python.org/pypi/carrot/ .. _`librabbitmq`: https://pypi.python.org/pypi/librabbitmq .. _`Pyro`: https://pythonhosting.org/Pyro4 .. _`SoftLayer MQ`: https://sldn.softlayer.com/reference/messagequeueapi .. _transport-comparison: Transport Comparison ==================== +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | **Client** | **Type** | **Direct** | **Topic** | **Fanout** | **Priority** | **TTL** | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *amqp* | Native | Yes | Yes | Yes | Yes [#f3]_ | Yes [#f4]_ | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *qpid* | Native | Yes | Yes | Yes | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | Yes | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *mongodb* | Virtual | Yes | Yes | Yes | Yes | Yes | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | Yes | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *in-memory* | Virtual | Yes | Yes [#f1]_ | No | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ .. [#f1] Declarations only kept in memory, so exchanges/queues must be declared by all clients that needs them. .. [#f2] Fanout supported via storing routing tables in SimpleDB. Disabled by default, but can be enabled by using the ``supports_fanout`` transport option. .. [#f3] AMQP Message priority support depends on broker implementation. .. [#f4] AMQP Message/Queue TTL support depends on broker implementation. Documentation ------------- Kombu is using Sphinx, and the latest documentation can be found here: https://kombu.readthedocs.io/ Quick overview -------------- .. code:: python from kombu import Connection, Exchange, Queue media_exchange = Exchange('media', 'direct', durable=True) video_queue = Queue('video', exchange=media_exchange, routing_key='video') def process_media(body, message): print body message.ack() # connections with Connection('amqp://guest:guest@localhost//') as conn: # produce producer = conn.Producer(serializer='json') producer.publish({'name': '/tmp/lolcat1.avi', 'size': 1301013}, exchange=media_exchange, routing_key='video', declare=[video_queue]) # the declare above, makes sure the video queue is declared # so that the messages can be delivered. # It's a best practice in Kombu to have both publishers and # consumers declare the queue. You can also declare the # queue manually using: # video_queue(conn).declare() # consume with conn.Consumer(video_queue, callbacks=[process_media]) as consumer: # Process messages and handle events on all channels while True: conn.drain_events() # Consume from several queues on the same channel: video_queue = Queue('video', exchange=media_exchange, key='video') image_queue = Queue('image', exchange=media_exchange, key='image') with connection.Consumer([video_queue, image_queue], callbacks=[process_media]) as consumer: while True: connection.drain_events() Or handle channels manually: .. code:: python with connection.channel() as channel: producer = Producer(channel, ...) consumer = Producer(channel) All objects can be used outside of with statements too, just remember to close the objects after use: .. code:: python from kombu import Connection, Consumer, Producer connection = Connection() # ... connection.release() consumer = Consumer(channel_or_connection, ...) consumer.register_callback(my_callback) consumer.consume() # .... consumer.cancel() `Exchange` and `Queue` are simply declarations that can be pickled and used in configuration files etc. They also support operations, but to do so they need to be bound to a channel. Binding exchanges and queues to a connection will make it use that connections default channel. :: >>> exchange = Exchange('tasks', 'direct') >>> connection = Connection() >>> bound_exchange = exchange(connection) >>> bound_exchange.delete() # the original exchange is not affected, and stays unbound. >>> exchange.delete() raise NotBoundError: Can't call delete on Exchange not bound to a channel. Terminology =========== There are some concepts you should be familiar with before starting: * Producers Producers sends messages to an exchange. * Exchanges Messages are sent to exchanges. Exchanges are named and can be configured to use one of several routing algorithms. The exchange routes the messages to consumers by matching the routing key in the message with the routing key the consumer provides when binding to the exchange. * Consumers Consumers declares a queue, binds it to a exchange and receives messages from it. * Queues Queues receive messages sent to exchanges. The queues are declared by consumers. * Routing keys Every message has a routing key. The interpretation of the routing key depends on the exchange type. There are four default exchange types defined by the AMQP standard, and vendors can define custom types (so see your vendors manual for details). These are the default exchange types defined by AMQP/0.8: * Direct exchange Matches if the routing key property of the message and the `routing_key` attribute of the consumer are identical. * Fan-out exchange Always matches, even if the binding does not have a routing key. * Topic exchange Matches the routing key property of the message by a primitive pattern matching scheme. The message routing key then consists of words separated by dots (`"."`, like domain names), and two special characters are available; star (`"*"`) and hash (`"#"`). The star matches any word, and the hash matches zero or more words. For example `"*.stock.#"` matches the routing keys `"usd.stock"` and `"eur.stock.db"` but not `"stock.nasdaq"`. Installation ============ You can install `Kombu` either via the Python Package Index (PyPI) or from source. To install using `pip`,: :: $ pip install kombu To install using `easy_install`,: :: $ easy_install kombu If you have downloaded a source tarball you can install it by doing the following,: :: $ python setup.py build # python setup.py install # as root Getting Help ============ Mailing list ------------ Join the `carrot-users`_ mailing list. .. _`carrot-users`: https://groups.google.com/group/carrot-users/ Bug tracker =========== If you have any suggestions, bug reports or annoyances please report them to our issue tracker at https://github.com/celery/kombu/issues/ Contributing ============ Development of `Kombu` happens at Github: https://github.com/celery/kombu You are highly encouraged to participate in the development. If you don't like Github (for some reason) you're welcome to send regular patches. License ======= This software is licensed under the `New BSD License`. See the `LICENSE` file in the top distribution directory for the full license text. .. |build-status| image:: https://secure.travis-ci.org/celery/kombu.png?branch=master :alt: Build status :target: https://travis-ci.org/celery/kombu .. |coverage| image:: https://codecov.io/github/celery/kombu/coverage.svg?branch=master :target: https://codecov.io/github/celery/kombu?branch=master .. |license| image:: https://img.shields.io/pypi/l/kombu.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/kombu.svg :alt: Kombu can be installed via wheel :target: https://pypi.python.org/pypi/kombu/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/kombu.svg :alt: Supported Python versions. :target: https://pypi.python.org/pypi/kombu/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/kombu.svg :alt: Support Python implementations. :target: https://pypi.python.org/pypi/kombu/ -- Keywords: messaging message amqp rabbitmq redis actor producer consumer Platform: any Classifier: Development Status :: 5 - Production/Stable Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Programming Language :: Python :: Implementation :: Jython Classifier: Intended Audience :: Developers Classifier: Topic :: Communications Classifier: Topic :: System :: Distributed Computing Classifier: Topic :: System :: Networking Classifier: Topic :: Software Development :: Libraries :: Python Modules kombu-4.1.0/THANKS0000644000175000017500000000172413130603207013471 0ustar omeromer00000000000000======== THANKS ======== From ``carrot`` THANKS file =========================== * Thanks to Barry Pederson for the py-amqplib library. * Thanks to Grégoire Cachet for bug reports. * Thanks to Martin Mahner for the Sphinx theme. * Thanks to jcater for bug reports. * Thanks to sebest for bug reports. * Thanks to greut for bug reports From ``django-kombu`` THANKS file ================================= * Thanks to Rajesh Dhawan and other authors of django-queue-service for the database model implementation. See http://code.google.com/p/django-queue-service/. From ``kombu-sqlalchemy`` THANKS file ===================================== * Thanks to Rajesh Dhawan and other authors of django-queue-service for the database model implementation. See http://code.google.com/p/django-queue-service/. * Thanks to haridsv for the draft SQLAlchemy port (which can still be found at http://github.com/haridsv/celery-alchemy-poc) kombu-4.1.0/LICENSE0000644000175000017500000000320013130603207013552 0ustar omeromer00000000000000Copyright (c) 2015-2016 Ask Solem & contributors. All rights reserved. Copyright (c) 2012-2014 GoPivotal Inc & contributors. All rights reserved. Copyright (c) 2009-2012, Ask Solem & contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Ask Solem nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Ask Solem OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. kombu-4.1.0/AUTHORS0000644000175000017500000001213213134153527013632 0ustar omeromer00000000000000========= AUTHORS ========= :order: sorted (`.,$!sort -uf`) Adam Gaca Adam Nelson Adam Wentz Alan Justino Alex Koshelev Alexandre Bourget Anastasis Andronidis Andrew Watts Andrey Antukh Andrii Kostenko Andy McCurdy Anthony Lukach Antoine Legrand Anton Gyllenberg Ask Solem Basil Mironenko Bobby Beever Brian Bernstein Brian Bouterse C Anthony Risinger Chris Erway Christophe Chauvet Christopher Duryee Christopher Grebs Clay Gerrard Corentin Ardeois Dan LaMotte Dan McGee Dane Guempel Davanum Srinivas David Clymer David Gelvin David Strauss David Ziegler Dhananjay Nene Dima Kurguzov Dmitry Malinovsky Dustin J. Mitchell Emmanuel Cazenave Ephemera Eric Reynolds Fabrice Rabaute Federico Ficarelli Felix Schwarz Felix Yan Fernando Jorge Mota Flavio [FlaPer87] Percoco Premoli Florian Munz Franck Cuny Germán M. Bravo Gregory Haskins Hank John haridsv Hong Minhee Ian Eure Ian Struble Ionel Maries Cristian iSlava James Saryerwinnie James Turk Jason Cater Jasper Bryant-Greene Jeff Balogh Jesper Thomschütz Jesse Dhillon John Shuping John Spray John Watson Jonathan Halcrow Joseph Crosland Joshua Harlow Juan Carlos Ferrer Kai Groner Keith Fitzgerald Kevin McCarthy Kevin McDonald Latitia M. Haskins Len Buckens Lorenzo Mancini Luyun Xie <2304310@qq.com> Mahendra M Marcin Lulek (ergo) Mark Lavin markow Matt Wise Maxime Rouyrre mdk Mher Movsisyan Michael Barrett Michael Nelson Nathan Van Gheem Nitzan Miron Noah Kantrowitz Ollie Walsh Pascal Hartig Patrick Schneider Paul McLanahan Petar Radosevic Peter Hoffmann Pierre Riteau Radek Czajka Rafael Duran Castaneda Rafal Malinowski Ralf Nyren Randy Barlow Raphael Michel Rob Ottaway Robert Kopaczewski Roger Hu Rumyana Neykova Rune Halvorsen Ryan Petrello Sam Stavinoha Sascha Peilicke Scott Lyons Sean Bleier Sean Creeley Seb Insua Sergey Azovskov Sergey Tikhonov Shane Caraveo Steeve Morin Stefan Eletzhofer Stephan Jaekel Stephen Day Stuart Axon Tareque Hossain Thomas Johansson Tobias Schottdorf Tomaž Muraus Tommie McAfee Travis Cline Travis Swicegood Victor Garcia Viet Hung Nguyen Vince Gonzalez Vincent Driessen Wido den Hollander Zach Smith Zhao Xiaohong kombu-4.1.0/kombu.egg-info/0000755000175000017500000000000013134154263015370 5ustar omeromer00000000000000kombu-4.1.0/kombu.egg-info/not-zip-safe0000644000175000017500000000000113134154262017615 0ustar omeromer00000000000000 kombu-4.1.0/kombu.egg-info/dependency_links.txt0000644000175000017500000000000113134154262021435 0ustar omeromer00000000000000 kombu-4.1.0/kombu.egg-info/PKG-INFO0000644000175000017500000004033313134154262016467 0ustar omeromer00000000000000Metadata-Version: 1.1 Name: kombu Version: 4.1.0 Summary: Messaging library for Python. Home-page: https://kombu.readthedocs.io Author: Ask Solem Author-email: ask@celeryproject.org License: BSD Description: ======================================== kombu - Messaging library for Python ======================================== |build-status| |coverage| |license| |wheel| |pyversion| |pyimp| :Version: 4.1.0 :Web: http://kombu.me/ :Download: http://pypi.python.org/pypi/kombu/ :Source: https://github.com/celery/kombu/ :Keywords: messaging, amqp, rabbitmq, redis, mongodb, python, queue About ===== `Kombu` is a messaging library for Python. The aim of `Kombu` is to make messaging in Python as easy as possible by providing an idiomatic high-level interface for the AMQ protocol, and also provide proven and tested solutions to common messaging problems. `AMQP`_ is the Advanced Message Queuing Protocol, an open standard protocol for message orientation, queuing, routing, reliability and security, for which the `RabbitMQ`_ messaging server is the most popular implementation. Features ======== * Allows application authors to support several message server solutions by using pluggable transports. * AMQP transport using the `py-amqp`_, `librabbitmq`_, or `qpid-python`_ libraries. * High performance AMQP transport written in C - when using `librabbitmq`_ This is automatically enabled if librabbitmq is installed: :: $ pip install librabbitmq * Virtual transports makes it really easy to add support for non-AMQP transports. There is already built-in support for `Redis`_, `Amazon SQS`_, `ZooKeeper`_, `SoftLayer MQ`_ and `Pyro`_. * In-memory transport for unit testing. * Supports automatic encoding, serialization and compression of message payloads. * Consistent exception handling across transports. * The ability to ensure that an operation is performed by gracefully handling connection and channel errors. * Several annoyances with `amqplib`_ has been fixed, like supporting timeouts and the ability to wait for events on more than one channel. * Projects already using `carrot`_ can easily be ported by using a compatibility layer. For an introduction to AMQP you should read the article `Rabbits and warrens`_, and the `Wikipedia article about AMQP`_. .. _`RabbitMQ`: https://www.rabbitmq.com/ .. _`AMQP`: https://amqp.org .. _`py-amqp`: https://pypi.python.org/pypi/amqp/ .. _`qpid-python`: https://pypi.python.org/pypi/qpid-python/ .. _`Redis`: https://redis.io .. _`Amazon SQS`: https://aws.amazon.com/sqs/ .. _`Zookeeper`: https://zookeeper.apache.org/ .. _`Rabbits and warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ .. _`amqplib`: https://barryp.org/software/py-amqplib/ .. _`Wikipedia article about AMQP`: https://en.wikipedia.org/wiki/AMQP .. _`carrot`: https://pypi.python.org/pypi/carrot/ .. _`librabbitmq`: https://pypi.python.org/pypi/librabbitmq .. _`Pyro`: https://pythonhosting.org/Pyro4 .. _`SoftLayer MQ`: https://sldn.softlayer.com/reference/messagequeueapi .. _transport-comparison: Transport Comparison ==================== +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | **Client** | **Type** | **Direct** | **Topic** | **Fanout** | **Priority** | **TTL** | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *amqp* | Native | Yes | Yes | Yes | Yes [#f3]_ | Yes [#f4]_ | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *qpid* | Native | Yes | Yes | Yes | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | Yes | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *mongodb* | Virtual | Yes | Yes | Yes | Yes | Yes | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | Yes | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *in-memory* | Virtual | Yes | Yes [#f1]_ | No | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ .. [#f1] Declarations only kept in memory, so exchanges/queues must be declared by all clients that needs them. .. [#f2] Fanout supported via storing routing tables in SimpleDB. Disabled by default, but can be enabled by using the ``supports_fanout`` transport option. .. [#f3] AMQP Message priority support depends on broker implementation. .. [#f4] AMQP Message/Queue TTL support depends on broker implementation. Documentation ------------- Kombu is using Sphinx, and the latest documentation can be found here: https://kombu.readthedocs.io/ Quick overview -------------- .. code:: python from kombu import Connection, Exchange, Queue media_exchange = Exchange('media', 'direct', durable=True) video_queue = Queue('video', exchange=media_exchange, routing_key='video') def process_media(body, message): print body message.ack() # connections with Connection('amqp://guest:guest@localhost//') as conn: # produce producer = conn.Producer(serializer='json') producer.publish({'name': '/tmp/lolcat1.avi', 'size': 1301013}, exchange=media_exchange, routing_key='video', declare=[video_queue]) # the declare above, makes sure the video queue is declared # so that the messages can be delivered. # It's a best practice in Kombu to have both publishers and # consumers declare the queue. You can also declare the # queue manually using: # video_queue(conn).declare() # consume with conn.Consumer(video_queue, callbacks=[process_media]) as consumer: # Process messages and handle events on all channels while True: conn.drain_events() # Consume from several queues on the same channel: video_queue = Queue('video', exchange=media_exchange, key='video') image_queue = Queue('image', exchange=media_exchange, key='image') with connection.Consumer([video_queue, image_queue], callbacks=[process_media]) as consumer: while True: connection.drain_events() Or handle channels manually: .. code:: python with connection.channel() as channel: producer = Producer(channel, ...) consumer = Producer(channel) All objects can be used outside of with statements too, just remember to close the objects after use: .. code:: python from kombu import Connection, Consumer, Producer connection = Connection() # ... connection.release() consumer = Consumer(channel_or_connection, ...) consumer.register_callback(my_callback) consumer.consume() # .... consumer.cancel() `Exchange` and `Queue` are simply declarations that can be pickled and used in configuration files etc. They also support operations, but to do so they need to be bound to a channel. Binding exchanges and queues to a connection will make it use that connections default channel. :: >>> exchange = Exchange('tasks', 'direct') >>> connection = Connection() >>> bound_exchange = exchange(connection) >>> bound_exchange.delete() # the original exchange is not affected, and stays unbound. >>> exchange.delete() raise NotBoundError: Can't call delete on Exchange not bound to a channel. Terminology =========== There are some concepts you should be familiar with before starting: * Producers Producers sends messages to an exchange. * Exchanges Messages are sent to exchanges. Exchanges are named and can be configured to use one of several routing algorithms. The exchange routes the messages to consumers by matching the routing key in the message with the routing key the consumer provides when binding to the exchange. * Consumers Consumers declares a queue, binds it to a exchange and receives messages from it. * Queues Queues receive messages sent to exchanges. The queues are declared by consumers. * Routing keys Every message has a routing key. The interpretation of the routing key depends on the exchange type. There are four default exchange types defined by the AMQP standard, and vendors can define custom types (so see your vendors manual for details). These are the default exchange types defined by AMQP/0.8: * Direct exchange Matches if the routing key property of the message and the `routing_key` attribute of the consumer are identical. * Fan-out exchange Always matches, even if the binding does not have a routing key. * Topic exchange Matches the routing key property of the message by a primitive pattern matching scheme. The message routing key then consists of words separated by dots (`"."`, like domain names), and two special characters are available; star (`"*"`) and hash (`"#"`). The star matches any word, and the hash matches zero or more words. For example `"*.stock.#"` matches the routing keys `"usd.stock"` and `"eur.stock.db"` but not `"stock.nasdaq"`. Installation ============ You can install `Kombu` either via the Python Package Index (PyPI) or from source. To install using `pip`,: :: $ pip install kombu To install using `easy_install`,: :: $ easy_install kombu If you have downloaded a source tarball you can install it by doing the following,: :: $ python setup.py build # python setup.py install # as root Getting Help ============ Mailing list ------------ Join the `carrot-users`_ mailing list. .. _`carrot-users`: https://groups.google.com/group/carrot-users/ Bug tracker =========== If you have any suggestions, bug reports or annoyances please report them to our issue tracker at https://github.com/celery/kombu/issues/ Contributing ============ Development of `Kombu` happens at Github: https://github.com/celery/kombu You are highly encouraged to participate in the development. If you don't like Github (for some reason) you're welcome to send regular patches. License ======= This software is licensed under the `New BSD License`. See the `LICENSE` file in the top distribution directory for the full license text. .. |build-status| image:: https://secure.travis-ci.org/celery/kombu.png?branch=master :alt: Build status :target: https://travis-ci.org/celery/kombu .. |coverage| image:: https://codecov.io/github/celery/kombu/coverage.svg?branch=master :target: https://codecov.io/github/celery/kombu?branch=master .. |license| image:: https://img.shields.io/pypi/l/kombu.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/kombu.svg :alt: Kombu can be installed via wheel :target: https://pypi.python.org/pypi/kombu/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/kombu.svg :alt: Supported Python versions. :target: https://pypi.python.org/pypi/kombu/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/kombu.svg :alt: Support Python implementations. :target: https://pypi.python.org/pypi/kombu/ -- Keywords: messaging message amqp rabbitmq redis actor producer consumer Platform: any Classifier: Development Status :: 5 - Production/Stable Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Programming Language :: Python :: Implementation :: Jython Classifier: Intended Audience :: Developers Classifier: Topic :: Communications Classifier: Topic :: System :: Distributed Computing Classifier: Topic :: System :: Networking Classifier: Topic :: Software Development :: Libraries :: Python Modules kombu-4.1.0/kombu.egg-info/top_level.txt0000644000175000017500000000000613134154262020115 0ustar omeromer00000000000000kombu kombu-4.1.0/kombu.egg-info/requires.txt0000644000175000017500000000054713134154262017775 0ustar omeromer00000000000000amqp<3.0,>=2.1.4 [consul] python-consul>=0.6.0 [librabbitmq] librabbitmq>=1.5.2 [mongodb] pymongo<3.0,>=2.6.2 [msgpack] msgpack-python>=0.4.7 [pyro] pyro4 [qpid] qpid-python>=0.26 qpid-tools>=0.26 [redis] redis>=2.8.0 [slmq] softlayer_messaging>=1.0.3 [sqlalchemy] sqlalchemy [sqs] boto3>=1.4.4 pycurl [yaml] PyYAML>=3.10 [zookeeper] kazoo>=1.3.1 kombu-4.1.0/kombu.egg-info/SOURCES.txt0000644000175000017500000002247513134154262017265 0ustar omeromer00000000000000AUTHORS Changelog FAQ INSTALL LICENSE MANIFEST.in README.rst THANKS TODO setup.cfg setup.py docs/Makefile docs/changelog.rst docs/conf.py docs/faq.rst docs/index.rst docs/introduction.rst docs/make.bat docs/_ext/.keep docs/_static/.keep docs/_templates/sidebardonations.html docs/images/favicon.ico docs/images/kombu.jpg docs/images/kombusmall.jpg docs/includes/installation.txt docs/includes/introduction.txt docs/includes/resources.txt docs/reference/index.rst docs/reference/kombu.abstract.rst docs/reference/kombu.async.aws.connection.rst docs/reference/kombu.async.aws.rst docs/reference/kombu.async.aws.sqs.connection.rst docs/reference/kombu.async.aws.sqs.message.rst docs/reference/kombu.async.aws.sqs.queue.rst docs/reference/kombu.async.aws.sqs.rst docs/reference/kombu.async.debug.rst docs/reference/kombu.async.http.base.rst docs/reference/kombu.async.http.curl.rst docs/reference/kombu.async.http.rst docs/reference/kombu.async.hub.rst docs/reference/kombu.async.rst docs/reference/kombu.async.semaphore.rst docs/reference/kombu.async.timer.rst docs/reference/kombu.clocks.rst docs/reference/kombu.common.rst docs/reference/kombu.compat.rst docs/reference/kombu.compression.rst docs/reference/kombu.connection.rst docs/reference/kombu.exceptions.rst docs/reference/kombu.five.rst docs/reference/kombu.log.rst docs/reference/kombu.message.rst docs/reference/kombu.mixins.rst docs/reference/kombu.pidbox.rst docs/reference/kombu.pools.rst docs/reference/kombu.resource.rst docs/reference/kombu.rst docs/reference/kombu.serialization.rst docs/reference/kombu.simple.rst docs/reference/kombu.transport.SLMQ.rst docs/reference/kombu.transport.SQS.rst docs/reference/kombu.transport.base.rst docs/reference/kombu.transport.consul.rst docs/reference/kombu.transport.etcd.rst docs/reference/kombu.transport.filesystem.rst docs/reference/kombu.transport.librabbitmq.rst docs/reference/kombu.transport.memory.rst docs/reference/kombu.transport.mongodb.rst docs/reference/kombu.transport.pyamqp.rst docs/reference/kombu.transport.pyro.rst docs/reference/kombu.transport.qpid.rst docs/reference/kombu.transport.redis.rst docs/reference/kombu.transport.rst docs/reference/kombu.transport.sqlalchemy.models.rst docs/reference/kombu.transport.sqlalchemy.rst docs/reference/kombu.transport.virtual.exchange.rst docs/reference/kombu.transport.virtual.rst docs/reference/kombu.transport.zookeeper.rst docs/reference/kombu.utils.amq_manager.rst docs/reference/kombu.utils.collections.rst docs/reference/kombu.utils.compat.rst docs/reference/kombu.utils.debug.rst docs/reference/kombu.utils.div.rst docs/reference/kombu.utils.encoding.rst docs/reference/kombu.utils.eventio.rst docs/reference/kombu.utils.functional.rst docs/reference/kombu.utils.imports.rst docs/reference/kombu.utils.json.rst docs/reference/kombu.utils.limits.rst docs/reference/kombu.utils.objects.rst docs/reference/kombu.utils.scheduling.rst docs/reference/kombu.utils.text.rst docs/reference/kombu.utils.time.rst docs/reference/kombu.utils.url.rst docs/reference/kombu.utils.uuid.rst docs/templates/readme.txt docs/userguide/connections.rst docs/userguide/consumers.rst docs/userguide/examples.rst docs/userguide/index.rst docs/userguide/introduction.rst docs/userguide/pools.rst docs/userguide/producers.rst docs/userguide/serialization.rst docs/userguide/simple.rst examples/complete_receive.py examples/complete_send.py examples/hello_consumer.py examples/hello_publisher.py examples/memory_transport.py examples/simple_eventlet_receive.py examples/simple_eventlet_send.py examples/simple_receive.py examples/simple_send.py examples/experimental/async_consume.py examples/rpc-tut6/rpc_client.py examples/rpc-tut6/rpc_server.py examples/simple_task_queue/__init__.py examples/simple_task_queue/client.py examples/simple_task_queue/queues.py examples/simple_task_queue/tasks.py examples/simple_task_queue/worker.py extra/appveyor/install.ps1 extra/appveyor/run_with_compiler.cmd extra/requirements/default.txt extra/requirements/dev.txt extra/requirements/docs.txt extra/requirements/funtest.txt extra/requirements/pkgutils.txt extra/requirements/test-ci-py2.txt extra/requirements/test-ci.txt extra/requirements/test.txt extra/requirements/extras/consul.txt extra/requirements/extras/couchdb.txt extra/requirements/extras/etcd.txt extra/requirements/extras/librabbitmq.txt extra/requirements/extras/mongodb.txt extra/requirements/extras/msgpack.txt extra/requirements/extras/pyro.txt extra/requirements/extras/qpid.txt extra/requirements/extras/redis.txt extra/requirements/extras/slmq.txt extra/requirements/extras/sqlalchemy.txt extra/requirements/extras/sqs.txt extra/requirements/extras/yaml.txt extra/requirements/extras/zookeeper.txt kombu/__init__.py kombu/abstract.py kombu/clocks.py kombu/common.py kombu/compat.py kombu/compression.py kombu/connection.py kombu/entity.py kombu/exceptions.py kombu/five.py kombu/log.py kombu/message.py kombu/messaging.py kombu/mixins.py kombu/pidbox.py kombu/pools.py kombu/resource.py kombu/serialization.py kombu/simple.py kombu.egg-info/PKG-INFO kombu.egg-info/SOURCES.txt kombu.egg-info/dependency_links.txt kombu.egg-info/not-zip-safe kombu.egg-info/requires.txt kombu.egg-info/top_level.txt kombu/async/__init__.py kombu/async/debug.py kombu/async/hub.py kombu/async/semaphore.py kombu/async/timer.py kombu/async/aws/__init__.py kombu/async/aws/connection.py kombu/async/aws/ext.py kombu/async/aws/sqs/__init__.py kombu/async/aws/sqs/connection.py kombu/async/aws/sqs/ext.py kombu/async/aws/sqs/message.py kombu/async/aws/sqs/queue.py kombu/async/http/__init__.py kombu/async/http/base.py kombu/async/http/curl.py kombu/transport/SLMQ.py kombu/transport/SQS.py kombu/transport/__init__.py kombu/transport/base.py kombu/transport/consul.py kombu/transport/etcd.py kombu/transport/filesystem.py kombu/transport/librabbitmq.py kombu/transport/memory.py kombu/transport/mongodb.py kombu/transport/pyamqp.py kombu/transport/pyro.py kombu/transport/qpid.py kombu/transport/redis.py kombu/transport/zookeeper.py kombu/transport/sqlalchemy/__init__.py kombu/transport/sqlalchemy/models.py kombu/transport/virtual/__init__.py kombu/transport/virtual/base.py kombu/transport/virtual/exchange.py kombu/utils/__init__.py kombu/utils/amq_manager.py kombu/utils/collections.py kombu/utils/compat.py kombu/utils/debug.py kombu/utils/div.py kombu/utils/encoding.py kombu/utils/eventio.py kombu/utils/functional.py kombu/utils/imports.py kombu/utils/json.py kombu/utils/limits.py kombu/utils/objects.py kombu/utils/scheduling.py kombu/utils/text.py kombu/utils/time.py kombu/utils/url.py kombu/utils/uuid.py requirements/default.txt requirements/dev.txt requirements/docs.txt requirements/funtest.txt requirements/pkgutils.txt requirements/test-ci-py2.txt requirements/test-ci.txt requirements/test.txt requirements/extras/consul.txt requirements/extras/couchdb.txt requirements/extras/etcd.txt requirements/extras/librabbitmq.txt requirements/extras/mongodb.txt requirements/extras/msgpack.txt requirements/extras/pyro.txt requirements/extras/qpid.txt requirements/extras/redis.txt requirements/extras/slmq.txt requirements/extras/sqlalchemy.txt requirements/extras/sqs.txt requirements/extras/yaml.txt requirements/extras/zookeeper.txt t/__init__.py t/mocks.py t/integration/__init__.py t/integration/transport.py t/integration/tests/__init__.py t/integration/tests/test_SLMQ.py t/integration/tests/test_SQS.py t/integration/tests/test_amqp.py t/integration/tests/test_librabbitmq.py t/integration/tests/test_mongodb.py t/integration/tests/test_pyamqp.py t/integration/tests/test_qpid.py t/integration/tests/test_redis.py t/integration/tests/test_sqla.py t/integration/tests/test_zookeeper.py t/unit/__init__.py t/unit/conftest.py t/unit/test_clocks.py t/unit/test_common.py t/unit/test_compat.py t/unit/test_compression.py t/unit/test_connection.py t/unit/test_entity.py t/unit/test_exceptions.py t/unit/test_log.py t/unit/test_message.py t/unit/test_messaging.py t/unit/test_mixins.py t/unit/test_pidbox.py t/unit/test_pools.py t/unit/test_serialization.py t/unit/test_simple.py t/unit/async/__init__.py t/unit/async/test_hub.py t/unit/async/test_semaphore.py t/unit/async/test_timer.py t/unit/async/aws/__init__.py t/unit/async/aws/case.py t/unit/async/aws/test_aws.py t/unit/async/aws/test_connection.py t/unit/async/aws/sqs/__init__.py t/unit/async/aws/sqs/test_connection.py t/unit/async/aws/sqs/test_queue.py t/unit/async/http/__init__.py t/unit/async/http/test_curl.py t/unit/async/http/test_http.py t/unit/transport/__init__.py t/unit/transport/test_SQS.py t/unit/transport/test_base.py t/unit/transport/test_consul.py t/unit/transport/test_etcd.py t/unit/transport/test_filesystem.py t/unit/transport/test_librabbitmq.py t/unit/transport/test_memory.py t/unit/transport/test_mongodb.py t/unit/transport/test_pyamqp.py t/unit/transport/test_qpid.py t/unit/transport/test_redis.py t/unit/transport/test_sqlalchemy.py t/unit/transport/test_transport.py t/unit/transport/test_zookeeper.py t/unit/transport/virtual/__init__.py t/unit/transport/virtual/test_base.py t/unit/transport/virtual/test_exchange.py t/unit/utils/__init__.py t/unit/utils/test_amq_manager.py t/unit/utils/test_compat.py t/unit/utils/test_debug.py t/unit/utils/test_div.py t/unit/utils/test_encoding.py t/unit/utils/test_functional.py t/unit/utils/test_imports.py t/unit/utils/test_json.py t/unit/utils/test_objects.py t/unit/utils/test_scheduling.py t/unit/utils/test_time.py t/unit/utils/test_url.py t/unit/utils/test_utils.py t/unit/utils/test_uuid.pykombu-4.1.0/Changelog0000644000175000017500000032213113134153527014377 0ustar omeromer00000000000000.. _changelog: ================ Change history ================ .. _version-4.1.0: 4.1.0 ===== :release-date: 2017-07-17 04:45 P.M MST :release-by: Anthony Lukach - SQS: Added support for long-polling on all supported queries. Fixed bug causing error on parsing responses with no retrieved messages from SQS. Contributed by **Anthony Lukach**. - Async hub: Fixed potential infinite loop while performing todo tasks (Issue celery/celery#3712). - Qpid: Fixed bug where messages could have duplicate ``delivery_tag`` (Issue #563). Contributed by **bmbouter**. - MongoDB: Fixed problem with using ``readPreference`` option at pymongo 3.x. Contributed by **Mikhail Elovskikh**. - Re-added support for :pypi:``SQLAlchemy`` Contributed by **Amin Ghadersohi**. - SQS: Fixed bug where hostname would default to ``localhost`` if not specified in settings. Contributed by **Anthony Lukach**. - Redis: Added support for reading password from transport URL (Issue #677). Contributed by **George Psarakis**. - RabbitMQ: Ensured safer encoding of queue arguments. Contributed by **Robert Kopaczewski**. - Added fallback to :func:``uuid.uuid5`` in :func:``generate_oid`` if :func:``uuid.uuid3`` fails. Contributed by **Bill Nottingham**. - Fixed race condition and innacurrate timeouts for :class:``kombu.simple.SimpleBase`` (Issue #720). Contributed by **c-nichols**. - Zookeeper: Fixed last chroot character trimming Contributed by **Dima Kurguzov**. - RabbitMQ: Fixed bug causing an exception when attempting to close an already-closed connection (Issue #690). Contributed by **eavictor**. - Removed deprecated use of StopIteration in generators and invalid regex escape sequence. Contributed by **Jon Dufresne**. - Added Python 3.6 to CI testing. Contributed by **Jon Dufresne**. - SQS: Allowed endpoint URL to be specified in the boto3 connection. Contributed by **georgepsarakis**. - SQS: Added support for Python 3.4. Contributed by **Anthony Lukach**. - SQS: ``kombu[sqs]`` now depends on :pypi:`boto3` (no longer using :pypi:`boto)`. - Adds support for Python 3.4+ - Adds support for FIFO queues (Issue #678) and (Issue celery/celery#3690) - Avoids issues around a broken endpoints file (Issue celery/celery#3672) Contributed by **Mischa Spiegelmock** and **Jerry Seutter**. - Zookeeper: Added support for delaying task with Python 3. Contributed by **Dima Kurguzov**. - SQS: Fixed bug where :meth:`kombu.transport.SQS.drain_events` did not support callback argument (Issue #694). Contributed by **Michael Montgomery**. - Fixed bug around modifying dictionary size while iterating over it (Issue #675). Contributed by **Felix Yan**. - etcd: Added handling for :exc:`EtcdException` exception rather than :exc:`EtcdError`. Contributed by **Stephen Milner**. - Documentation improvements by: - **Mads Jensen** - **Matias Insaurralde** - **Omer Katz** - **Dmitry Dygalo** - **Christopher Hoskin** .. _version-4.0.2: 4.0.2 ===== :release-date: 2016-12-15 03:31 P.M PST :release-by: Ask Solem - Now depends on :mod:`amqp` 2.1.4 This new version takes advantage of TCP Keepalive settings on Linux, making it better at detecting closed connections, also in failover conditions. - Redis: Priority was reversed so, e.g. priority 0 became priority 9. .. _version-4.0.1: 4.0.1 ===== :release-date: 2016-12-07 06:00 P.M PST :release-by: Ask Solem - Now depends on :mod:`amqp` 2.1.3 This new version takes advantage of the new ``TCP_USER_TIMEOUT`` socket option on Linux. - Producer: Fixed performance degradation when default exchange specified (Issue #651). - QPid: Switch to using getattr in qpid.Transport.__del__ (Issue #658) Contributed by **Patrick Creech**. - QPid: Now uses monotonic time for timeouts. - MongoDB: Fixed compatibility with Python 3 (Issue #661). - Consumer: ``__exit__`` now skips cancelling consumer if connection-related error raised (Issue #670). - MongoDB: Removes use of natural sort (Issue #638). Contributed by **Anton Chaporgin**. - Fixed wrong keyword argument ``channel`` error (Issue #652). Contributed by **Toomore Chiang**. - Safe argument to ``urllib.quote`` must be bytes on Python 2.x (Issue #645). - Documentation improvments by: - **Carlos Edo** - **Cemre Mengu** .. _version-4.0: 4.0 === :release-date: 2016-10-28 16:45 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 2.0. The new py-amqp version have been refactored for better performance, using modern Python socket conventions, and API consistency. - No longer depends on :mod:`anyjson`. Kombu will now only choose between :pypi:`simplejson` and the built-in :mod:`json`. Using the latest version of simplejson is recommended: .. code-block:: console $ pip install -U simplejson - Removed transports that are no longer supported in this version: - Django ORM transport - SQLAlchemy ORM transport - Beanstalk transport - ZeroMQ transport - amqplib transport (use pyamqp). - API Changes * Signature of :class:`kombu.Message` now takes body as first argment. It used to be ``Message(channel, body=body, **kw)``, but now it's ``Message(body, channel=channel, **kw)``. This is unlikey to affect you, as the Kombu API does not have users instantiate messages manually. - New SQS transport Donated by NextDoor, with additional contributions from mdk. .. note:: ``kombu[sqs]`` now depends on :pypi:`pycurl`. - New Consul transport. Contributed by **Wido den Hollander**. - New etcd transport. Contributed by **Stephen Milner**. - New Qpid transport. It was introduced as an experimental transport in Kombu 3.0, but is now mature enough to be fully supported. Created and maintained by **Brian Bouterse**. - Redis: Priority 0 is now lowest, 9 is highest. (**backward incompatible**) This to match how priorities in AMQP works. Fix contributed by **Alex Koshelev**. - Redis: Support for Sentinel You can point the connection to a list of sentinel URLs like: .. code-block:: text sentinel://0.0.0.0:26379;sentinel://0.0.0.0:26380/... where each sentinel is separated by a `;`. Multiple sentinels are handled by :class:`kombu.Connection` constructor, and placed in the alternative list of servers to connect to in case of connection failure. Contributed by **Sergey Azovskov**, and **Lorenzo Mancini** - RabbitMQ Queue Extensions New arguments have been added to :class:`kombu.Queue` that lets you directly and conveniently configure the RabbitMQ queue extensions. - ``Queue(expires=20.0)`` Set queue expiry time in float seconds. See :attr:`kombu.Queue.expires`. - ``Queue(message_ttl=30.0)`` Set queue message time-to-live float seconds. See :attr:`kombu.Queue.message_ttl`. - ``Queue(max_length=1000)`` Set queue max length (number of messages) as int. See :attr:`kombu.Queue.max_length`. - ``Queue(max_length_bytes=1000)`` Set queue max length (message size total in bytes) as int. See :attr:`kombu.Queue.max_length_bytes`. - ``Queue(max_priority=10)`` Declare queue to be a priority queue that routes messages based on the ``priority`` field of the message. See :attr:`kombu.Queue.max_priority`. - RabbitMQ: ``Message.ack`` now supports the ``multiple`` argument. If multiple is set to True, then all messages received before the message being acked will also be acknowledged. - ``amqps://`` can now be specified to require SSL (Issue #610). - ``Consumer.cancel_by_queue`` is now constant time. - ``Connection.ensure*`` now raises :exc:`kombu.exceptions.OperationalError`. Things that can be retried are now reraised as :exc:`kombu.exceptions.OperationalError`. - Redis: Fixed SSL support. Contributed by **Robert Kolba**. - New ``Queue.consumer_arguments`` can be used for the ability to set consumer priority via ``x-priority``. See https://www.rabbitmq.com/consumer-priority.html Example: .. code-block:: python Queue( 'qname', exchange=Exchange('exchange'), routing_key='qname', consumer_arguments={'x-priority': 3}, ) - Queue/Exchange: ``no_declare`` option added (also enabled for internal amq. exchanges) (Issue #565). - JSON serializer now calls ``obj.__json__`` for unsupported types. This means you can now define a ``__json__`` method for custom types that can be reduced down to a built-in json type. Example: .. code-block:: python class Person: first_name = None last_name = None address = None def __json__(self): return { 'first_name': self.first_name, 'last_name': self.last_name, 'address': self.address, } - JSON serializer now handles datetimes, Django promise, UUID and Decimal. - Beanstalk: Priority 0 is now lowest, 9 is highest. (**backward incompatible**) This to match how priorities in AMQP works. Fix contributed by **Alex Koshelev**. - Redis: now supports SSL using the ``ssl`` argument to :class:`~kombu.Connection`. - Redis: Fanout exchanges are no longer visible between vhosts, and fanout messages can be filtered by patterns. (**backward incompatible**) It was possible to enable this mode previously using the ``fanout_prefix``, and ``fanout_patterns`` transport options, but now these are enabled by default. If you want to mix and match producers/consumers running different versions you need to configure your kombu 3.x clients to also enable these options: .. code-block:: pycon >>> Connection(transport_options={ 'fanout_prefix': True, 'fanout_patterns': True, }) - Pidbox: Mailbox new arguments: TTL and expiry. Mailbox now supports new arguments for controlling message TTLs and queue expiry, both for the mailbox queue and for reply queues. - ``queue_expires`` (float/int seconds). - ``queue_ttl`` (float/int seconds). - ``reply_queue_expires`` (float/int seconds). - ``reply_queue_ttl`` (float/int seconds). All take seconds in int/float. Contributed by **Alan Justino**. - Exchange.delivery_mode now defaults to :const:`None`, and the default is instead set by ``Producer.publish``. - :class:`~kombu.Consumer` now supports a new ``prefetch_count`` argument, which if provided will force the consumer to set an initial prefetch count just before starting. - Virtual transports now stores ``priority`` as a property, not in ``delivery_info``, to be compatible with AMQP. - ``reply_to`` argument to ``Producer.publish`` can now be :class:`~kombu.Queue` instance. - Connection: There's now a new method ``Connection.supports_exchange_type(type)`` that can be used to check if the current transport supports a specific exchange type. - SQS: Consumers can now read json messages not sent by Kombu. Contributed by **Juan Carlos Ferrer**. - SQS: Will now log the access key used when authentication fails. Contributed by **Hank John**. - Added new :class:`kombu.mixins.ConsumerProducerMixin` for consumers that will also publish messages on a separate connection. - Messages: Now have a more descriptive ``repr``. Contributed by **Joshua Harlow**. - Async: HTTP client based on curl. - Async: Now uses `poll` instead of `select` where available. - MongoDB: Now supports priorities Contributed by **Alex Koshelev**. - Virtual transports now supports multiple queue bindings. Contributed by **Federico Ficarelli**. - Virtual transports now supports the anon exchange. If when publishing a message, the exchange argument is set to '' (empty string), the routing_key will be regarded as the destination queue. This will bypass the routing table compeltely, and just deliver the message to the queue name specified in the routing key. - Zookeeper: Transport now uses the built-in suport in kazoo to handle failover when using a list of server names. Contributed by **Joshua Harlow**. - ConsumerMixin.run now passes keyword arguments to .consume. Deprecations and removals ------------------------- - The deprecated method ``Consumer.add_queue_from_dict`` has been removed. Use instead: .. code-block:: python consumer.add_queue(Queue.from_dict(queue_name, **options)) - The deprecated function ``kombu.serialization.encode`` has been removed. Use :func:`kombu.serialization.dumps` instead. - The deprecated function ``kombu.serialization.decode`` has been removed. Use :func:`kombu.serialization.loads` instead. - Removed module ``kombu.syn`` ``detect_environment`` has been moved to kombu.utils.compat .. _version-3.0.37: 3.0.37 ====== :release-date: 2016-10-06 05:00 P.M PDT :release-by: Ask Solem - Connection: Return value of ``.info()`` was no longer JSON serializable, leading to "itertools.cycle object not JSON serializable" errors (Issue #635). .. _version-3.0.36: 3.0.36 ====== :release-date: 2016-09-30 03:06 P.M PDT :release-by: Ask Solem - Connection: Fixed bug when cloning connection with alternate urls. Fix contributed by Emmanuel Cazenave. - Redis: Fixed problem with unix socket connections. https://github.com/celery/celery/issues/2903 Fix contributed by Raphael Michel. - Redis: Fixed compatibility with older redis-py versions (Issue #576). - Broadcast now retains queue name when being copied/pickled (Issue #578). .. _version-3.0.35: 3.0.35 ====== :release-date: 2016-03-22 11:22 P.M PST :release-by: Ask Solem - msgpack: msgpack support now requires msgpack-python > 0.4.7. - Redis: TimeoutError was no longer handled as a recoverable error. - Redis: Adds the ability to set more Redis connection options using ``Connection(transport_options={...})``. - ``socket_connect_timeout`` - ``socket_keepalive`` (requires :mod:`redis-py` > 2.10) - ``socket_keepalive_options`` (requires :mod:`redis-py` > 2.10) - msgpack: Fixes support for binary/unicode data .. _version-3.0.34: 3.0.34 ====== :release-date: 2016-03-03 05:30 P.M PST :release-by: Ask Solem - Qpid: Adds async error handling. Contributed by Brian Bouterse. - Qpid: Delivery tag is now a UUID4 (Issue #563). Fix contributed by Brian Bouterse. - Redis: Connection.as_uri() returned malformed URLs when the ``redis+socket`` scheme was ised (Issue celery/celery#2995). - msgpack: Use binary encoding instead of utf-8 (Issue #570). .. _version-3.0.33: 3.0.33 ====== :release-date: 2016-01-08 06:36 P.M PST :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.9. - Redis: Fixed problem with auxilliary connections causing the main consumer connection to be closed (Issue #550). - Qpid: No longer uses threads to operate, to ensure compatibility with all environments (Issue #531). .. _version-3.0.32: 3.0.32 ====== :release-date: 2015-12-16 02:29 P.M PST :release-by: Ask Solem - Redis: Fixed bug introduced in 3.0.31 where the redis transport always connects to localhost, regardless of host setting. .. _version-3.0.31: 3.0.31 ====== :release-date: 2015-12-16 12:00 P.M PST :release-by: Ask Solem - Redis: Fixed bug introduced in 3.0.30 where socket was prematurely disconnected. - Hub: Removed debug logging message: "Deregistered fd..." (Issue #549). .. _version-3.0.30: 3.0.30 ====== :release-date: 2015-12-07 12:28 A.M PST :release-by: Ask Solem - Fixes compatiblity with uuid in Python 2.7.11 and 3.5.1. Fix contributed by Kai Groner. - Redis transport: Attempt at fixing problem with hanging consumer after disconnected from server. - Event loop: Attempt at fixing issue with 100% CPU when using the Redis transport, - Database transport: Fixed oracle compatiblity. An "ORA-00907: missing right parenthesis" error could manifest when using an Oracle database with the database transport. Fix contributed by Deepak N. - Documentation fixes Contributed by Tommaso Barbugli. .. _version-3.0.29: 3.0.29 ====== :release-date: 2015-10-26 11:10 A.M PDT :release-by: Ask Solem - Fixed serialization issue for ``bindings.as_dict()`` (Issue #453). Fix contributed by Sergey Tikhonov. - Json serializer wrongly treated bytes as ``ascii``, not ``utf-8`` (Issue #532). - MongoDB: Now supports pymongo 3.x. Contributed by Len Buckens. - SQS: Tests passing on Python 3. Fix contributed by Felix Yan .. _version-3.0.28: 3.0.28 ====== :release-date: 2015-10-12 12:00 PM PDT :release-by: Ask Solem .. admonition:: Django transport migrations. If you're using Django 1.8 and have already created the kombu_transport_django tables, you have to run a fake initial migration: .. code-block:: console $ python manage.py migrate kombu_transport_django --fake-initial - No longer compatible with South by default. To keep using kombu.transport.django with South migrations you now need to configure a new location for the kombu migrations: .. code-block:: python SOUTH_MIGRATION_MODULES = { 'kombu_transport_django': 'kombu.transport.django.south_migrations', } - Keep old South migrations in ``kombu.transport.django.south_migrations``. - Now works with Redis < 2.10 again. .. _version-3.0.27: 3.0.27 ====== :release-date: 2015-10-09 3:10 PM PDT :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.7. - Fixed libSystem import error on some macOS 10.11 (El Capitan) installations. Fix contributed by Eric Wang. - Now compatible with Django 1.9. - Django: Adds migrations for the database transport. - Redis: Now depends on py-redis 2.10.0 or later (Issue #468). - QPid: Can now connect as localhost (Issue #519). Fix contributed by Brian Bouterse. - QPid: Adds support for ``login_method`` (Issue #502, Issue #499). Contributed by Brian Bouterse. - QPid: Now reads SASL mechanism from broker string (Issue #498). Fix contributed by Brian Bouterse. - QPid: Monitor thread now properly terminated on session close (Issue #485). Fix contributed by Brian Bouterse. - QPid: Fixed file descriptor leak (Issue #476). Fix contributed by Jeff Ortel - Docs: Fixed wrong order for entrypoint arguments (Issue #473). - ConsumerMixin: Connection error logs now include traceback (Issue #480). - BaseTransport now raises RecoverableConnectionError when disconnected (Issue #507). - Consumer: Adds ``tag_prefix`` option to modify how consumer tags are generated (Issue #509). .. _version-3.0.26: 3.0.26 ====== :release-date: 2015-04-22 06:00 P.M UTC :release-by: Ask Solem - Fixed compatibility with py-redis versions before 2.10.3 (Issue #470). .. _version-3.0.25: 3.0.25 ====== :release-date: 2015-04-21 02:00 P.M UTC :release-by: Ask Solem - pyamqp/librabbitmq now uses 5671 as default port when SSL is enabled (Issue #459). - Redis: Now supports passwords in ``redis+socket://:pass@host:port`` URLs (Issue #460). - ``Producer.publish`` now defines the ``expiration`` property in support of the `RabbitMQ per-message TTL extension`_. Contributed by Anastasis Andronidis. - Connection transport attribute now set correctly for all transports. Contributed by Alex Koshelev. - qpid: Fixed bug where the connectionw as not being closed properly. Contributed by Brian Bouterse. - :class:`~kombu.entity.bindings` is now JSON serializable (Issue #453). Contributed by Sergey Tikhonov. - Fixed typo in error when yaml is not installed (said ``msgpack``). Contributed by Joshua Harlow. - Redis: Now properly handles :exc:`redis.exceptions.TimeoutError` raised by :mod:`redis`. Contributed by markow. - qpid: Adds additional string to check for when connecting to qpid. When we connect to qpid, we need to ensure that we skip to the next SASL mechanism if the current mechanism fails. Otherwise, we will keep retrying the connection with a non-working mech. Contributed by Chris Duryee. - qpid: Handle ``NotFound`` exceptions. Contributed by Brian Bouterse. - :class:`Queue.__repr__` now makes sure return value is not unicode (Issue #440). - qpid: ``Queue.purge`` incorrectly raised :exc:`AttributeErrror` if the does not exist (Issue #439). Contributed by Brian Bouterse. - Linux: Now ignores permission errors on epoll unregister. .. _`RabbitMQ per-message TTL extension`: https://www.rabbitmq.com/ttl.html .. _version-3.0.24: 3.0.24 ====== :release-date: 2014-11-17 11:00 P.M UTC :release-by: Ask Solem - The `Qpid `_ broker is supported for Python 2.x environments. The Qpid transport includes full SSL support within Kombu. See the :mod:`kombu.transport.qpid` docs for more info. Contributed by Brian Bouterse and Chris Duryee through support from Red Hat. - Dependencies: extra[librabbitmq] now requires librabbitmq 1.6.0 - Docstrings for :class:`~kombu.utils.limit.TokenBucket` did not match implementation. Fix contributed by Jesse Dhillon. - :func:`~kombu.common.oid_from` accidentally called ``uuid.getnode()`` but did not use the return value. Fix contributed by Alexander Todorov. - Redis: Now ignores errors when cosing the underlying connection. - Redis: Restoring messages will now use a single connection. - ``kombu.five.monotonic``: Can now be imported even if ctypes is not available for some reason (e.g. App Engine) - Documentation: Improved example to use the ``declare`` argument to ``Producer`` (Issue #423). - Django: Fixed ``app_label`` for older Django versions (``< 1.7``). (Issue #414). .. _version-3.0.23: 3.0.23 ====== :release-date: 2014-09-14 10:45 P.M UTC :release-by: Ask Solem - Django: Fixed bug in the Django 1.7 compatibility improvements related to autocommit handling. Contributed by Radek Czajka. - Django: The Django transport models would not be created on syncdb after app label rename (Issue #406). .. _version-3.0.22: 3.0.22 ====== :release-date: 2014-09-04 03:00 P.M UTC :release-by: Ask Solem - kombu.async: Min. delay between waiting for timer was always increased to one second. - Fixed bug in itermessages where message is received after the with statement exits the block. Fixed by Rumyana Neykova - Connection.autoretry: Now works with functions missing wrapped attributes (``__module__``, ``__name__``, ``__doc__``). Fixes #392. Contributed by johtso. - Django: Now sets custom app label for ``kombu.transport.django`` to work with recent changes in Django 1.7. - SimpleQueue removed messages from the wrong end of buffer (Issue #380). - Tests: Now using ``unittest.mock`` if available (Issue #381). .. _version-3.0.21: 3.0.21 ====== :release-date: 2014-07-07 02:00 P.M UTC :release-by: Ask Solem - Fixed remaining bug in ``maybe_declare`` for ``auto_delete`` exchanges. Fix contributed by Roger Hu. - MongoDB: Creating a channel now properly evaluates a connection (Issue #363). Fix contributed by Len Buckens. .. _version-3.0.20: 3.0.20 ====== :release-date: 2014-06-24 02:30 P.M UTC :release-by: Ask Solem - Reverts change in 3.0.17 where ``maybe_declare`` caches the declaration of auto_delete queues and exchanges. Fix contributed by Roger Hu. - Redis: Fixed race condition when using gevent and the channel is closed. Fix contributed by Andrew Rodionoff. .. _version-3.0.19: 3.0.19 ====== :release-date: 2014-06-09 03:10 P.M UTC :release-by: Ask Solem - The wheel distribution did not support Python 2.6 by failing to list the extra dependencies required. - Durable and auto_delete queues/exchanges can be be cached using ``maybe_declare``. .. _version-3.0.18: 3.0.18 ====== :release-date: 2014-06-02 06:00 P.M UTC :release-by: Ask Solem - A typo introduced in 3.0.17 caused kombu.async.hub to crash (Issue #360). .. _version-3.0.17: 3.0.17 ====== :release-date: 2014-06-02 05:00 P.M UTC :release-by: Ask Solem - ``kombu[librabbitmq]`` now depends on librabbitmq 1.5.2. - Async: Event loop now selectively removes file descriptors for the mode it failed in, and keeps others (e.g read vs write). Fix contributed by Roger Hu. - CouchDB: Now works without userid set. Fix contributed by Latitia M. Haskins. - SQLAlchemy: Now supports recovery from connection errors. Contributed by Felix Schwarz. - Redis: Restore at shutdown now works when ack emulation is disabled. - :func:`kombu.common.eventloop` accidentally swallowed socket errors. - Adds :func:`kombu.utils.url.sanitize_url` .. _version-3.0.16: 3.0.16 ====== :release-date: 2014-05-06 01:00 P.M UTC :release-by: Ask Solem - ``kombu[librabbitmq]`` now depends on librabbitmq 1.5.1. - Redis: Fixes ``TypeError`` problem in ``unregister`` (Issue #342). Fix contributed by Tobias Schottdorf. - Tests: Some unit tests accidentally required the `redis-py` library. Fix contributed by Randy Barlow. - librabbitmq: Would crash when using an older version of :mod:`librabbitmq`, now emits warning instead. .. _version-3.0.15: 3.0.15 ====== :release-date: 2014-04-15 09:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.5. - RabbitMQ 3.3 changes QoS semantics (Issue #339). See the RabbitMQ release notes here: http://www.rabbitmq.com/blog/2014/04/02/breaking-things-with-rabbitmq-3-3/ A new connection property has been added that can be used to detect whether the remote server is using this new QoS behavior: .. code-block:: pycon >>> Connection('amqp://').qos_behavior_matches_spec False so if your application depends on the old semantics you can use this to set the ``apply_global`` flag appropriately: .. code-block:: python def update_prefetch_count(channel, new_value): channel.basic_qos( 0, new_value, not channel.connection.client.qos_behavior_matches_spec, ) - Users of :mod:`librabbitmq` is encouraged to upgrade to librabbitmq 1.5.0. The ``kombu[librabbitmq]`` extra has been updated to depend on this version. - Pools: Now takes transport options into account when comparing connections (Issue #333). - MongoDB: Fixes Python 3 compatibility. - Async: select: Ignore socket errors when attempting to unregister handles from the loop. - Pidbox: Can now be configured to use a serializer other than json, but specifying a serializer argument to :class:`~kombu.pidbox.Mailbox`. Contributed by Dmitry Malinovsky. - Message decompression now works with Python 3. Fix contributed by Adam Gaca. .. _version-3.0.14: 3.0.14 ====== :release-date: 2014-03-19 07:00 P.M UTC :release-by: Ask Solem - **MongoDB**: Now endures a connection failover (Issue #123). Fix contributed by Alex Koshelev. - **MongoDB**: Fixed ``KeyError`` when a replica set member is removed. Also fixes celery#971 and celery/#898. Fix contributed by Alex Koshelev. - **MongoDB**: Fixed MongoDB broadcast cursor re-initialization bug. Fix contributed by Alex Koshelev. - **Async**: Fixed bug in lax semaphore implementation where in some usage patterns the limit was not honored correctly. Fix contributed by Ionel Cristian MărieÈ™. - **Redis**: Fixed problem with fanout when using Python 3 (Issue #324). - **Redis**: Fixed ``AttributeError`` from attempting to close a non-existing connection (Issue #320). .. _version-3.0.13: 3.0.13 ====== :release-date: 2014-03-03 04:00 P.M UTC :release-by: Ask Solem - Redis: Fixed serious race condition that could lead to data loss. The delivery tags were accidentally set to be an incremental number local to the channel, but the delivery tags need to be globally unique so that a message can not overwrite an older message in the backup store. This change is not backwards incompatible and you are encouraged to update all your system using a previous version as soon as possible. - Now depends on :mod:`amqp` 1.4.4. - Pidbox: Now makes sure message encoding errors are handled by default, so that a custom error handler does not need to be specified. - Redis: The fanout exchange can now use AMQP patterns to route and filter messages. This change is backwards incompatible and must be enabled with the ``fanout_patterns`` transport option: .. code-block:: pycon >>> conn = kombu.Connection('redis://', transport_options={ ... 'fanout_patterns': True, ... }) When enabled the exchange will work like an amqp topic exchange if the binding key is a pattern. This is planned to be default behavior in the future. - Redis: Fixed ``cycle`` no such attribute error. .. _version-3.0.12: 3.0.12 ====== :release-date: 2014-02-09 03:50 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.3. - Fixes Python 3.4 logging incompatibility (Issue #311). - Redis: Now properly handles unknown pub/sub messages. Fix contributed by Sam Stavinoha. - amqplib: Fixed bug where more bytes were requested from the socket than necessary. Fix contributed by Ionel Cristian MărieÈ™. .. _version-3.0.11: 3.0.11 ====== :release-date: 2014-02-03 05:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.2. - Now always trusts messages of type `application/data` and `application/text` or which have an unspecified content type (Issue #306). - Compression errors are now handled as decode errors and will trigger the ``Consumer.on_decode_error`` callback if specified. - New ``kombu.Connection.get_heartbeat_interval()`` method that can be used to access the negotiated heartbeat value. - `kombu.common.oid_for` no longer uses the MAC address of the host, but instead uses a process-wide UUID4 as a node id. This avoids a call to `uuid.getnode()` at module scope. - Hub.add: Now normalizes registered fileno. Contributed by Ionel Cristian MărieÈ™. - SQS: Fixed bug where the prefetch count limit was not respected. .. _version-3.0.10: 3.0.10 ====== :release-date: 2014-01-17 05:40 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.1. - ``maybe_declare`` now raises a "recoverable connection error" if the channel is disconnected instead of a :exc:`ChannelError` so that the operation can be retried. - Redis: ``Consumer.cancel()`` is now thread safe. This fixes an issue when using gevent/eventlet and a message is handled after the consumer is canceled resulting in a "message for queue without consumers" error. - Retry operations would not always respect the interval_start value when calculating the time to sleep for (Issue #303). Fix contributed by Antoine Legrand. - Timer: Fixed "unhashable type" error on Python 3. - Hub: Do not attempt to unregister operations on an already closed poller instance. .. _version-3.0.9: 3.0.9 ===== :release-date: 2014-01-13 05:30 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.4.0. - Redis: Basic cancel for fanout based queues now sends a corresponding ``UNSUBSCRIBE`` command to the server. This fixes an issue with pidbox where reply messages could be received after the consumer was canceled, giving the ``"message to queue without consumers"`` error. - MongoDB: Improved connection string and options handling (Issue #266 + Issue #120). Contributed by Alex Koshelev. - SQS: Limit the number of messages when receiving in batch to 10. This is a hard limit enforced by Amazon so the sqs transport must not exceeed this value. Fix contributed by Eric Reynolds. - ConsumerMixin: ``consume`` now checks heartbeat every time the socket times out. Contributed by Dustin J. Mitchell. - Retry Policy: A max retries of 0 did not retry forever. Fix contributed by Antoine Legrand. - Simple: If passing a Queue object the simple utils will now take default routing key from that queue. Contributed by Fernando Jorge Mota. - ``repr(producer)`` no longer evaluates the underlying channnel. - Redis: The map of Redis error classes are now exposed at the module level using the :func:`kombu.transport.redis.get_redis_error_classes` function. - Async: ``Hub.close`` now sets ``.poller`` to None. .. _version-3.0.8: 3.0.8 ===== :release-date: 2013-12-16 05:00 P.M UTC :release-by: Ask Solem - Serializer: loads and dumps now wraps exceptions raised into :exc:`~kombu.exceptions.DecodeError` and :exc:`kombu.exceptions.EncodeError` respectively. Contributed by Ionel Cristian Maries - Redis: Would attempt to read from the wrong connection if a select/epoll/kqueue exception event happened. Fix contributed by Michael Nelson. - Redis: Disabling ack emulation now works properly. Fix contributed by Michael Nelson. - Redis: :exc:`IOError` and :exc:`OSError` are now treated as recoverable connection errors. - SQS: Improved performance by reading messages in bulk. Contributed by Matt Wise. - Connection Pool: Attempting to acquire from a closed pool will now raise :class:`RuntimeError`. .. _version-3.0.7: 3.0.7 ===== :release-date: 2013-12-02 04:00 P.M UTC :release-by: Ask Solem - Fixes Python 2.6 compatibility. - Redis: Fixes 'bad file descriptor' issue. .. _version-3.0.6: 3.0.6 ===== :release-date: 2013-11-21 04:50 P.M UTC :release-by: Ask Solem - Timer: No longer attempts to hash keyword arguments (Issue #275). - Async: Did not account for the long type for file descriptors. Fix contributed by Fabrice Rabaute. - PyPy: kqueue support was broken. - Redis: Bad pub/sub payloads no longer crashes the consumer. - Redis: Unix socket URLs can now specify a virtual host by including it as a query parameter. Example URL specifying a virtual host using database number 3: .. code-block:: text redis+socket:///tmp/redis.sock?virtual_host=3 - ``kombu.VERSION`` is now a named tuple. .. _version-3.0.5: 3.0.5 ===== :release-date: 2013-11-15 11:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.3.3. - Redis: Fixed Python 3 compatibility problem (Issue #270). - MongoDB: Fixed problem with URL parsing when authentication used. Fix contributed by dongweiming. - pyamqp: Fixed small issue when publishing the message and the property dictionary was set to None. Fix contributed by Victor Garcia. - Fixed problem in ``repr(LaxBoundedSemaphore)``. Fix contributed by Antoine Legrand. - Tests now passing on Python 3.3. .. _version-3.0.4: 3.0.4 ===== :release-date: 2013-11-08 01:00 P.M UTC :release-by: Ask Solem - common.QoS: ``decrement_eventually`` now makes sure the value does not go below 1 if a prefetch count is enabled. .. _version-3.0.3: 3.0.3 ===== :release-date: 2013-11-04 03:00 P.M UTC :release-by: Ask Solem - SQS: Properly reverted patch that caused delays between messages. Contributed by James Saryerwinnie - select: Clear all registerd fds on poller.cloe - Eventloop: unregister if EBADF raised. .. _version-3.0.2: 3.0.2 ===== :release-date: 2013-10-29 02:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` version 1.3.2. - select: Fixed problem where unregister did not properly remove the fd. .. _version-3.0.1: 3.0.1 ===== :release-date: 2013-10-24 04:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` version 1.3.1. - Redis: New option ``fanout_keyprefix`` This transport option is recommended for all users as it ensures that broadcast (fanout) messages sent is only seen by the current virtual host: .. code-block:: python Connection('redis://', transport_options={'fanout_keyprefix': True}) However, enabling this means that you cannot send or receive messages from older Kombu versions so make sure all of your participants are upgraded and have the transport option enabled. This will be the default behavior in Kombu 4.0. - Distribution: Removed file ``requirements/py25.txt``. - MongoDB: Now disables ``auto_start_request``. - MongoDB: Enables ``use_greenlets`` if eventlet/gevent used. - Pidbox: Fixes problem where expires header was None, which is a value not supported by the amq protocol. - ConsumerMixin: New ``consumer_context`` method for starting the consumer without draining events. .. _version-3.0.0: 3.0.0 ===== :release-date: 2013-10-14 04:00 P.M BST :release-by: Ask Solem - Now depends on :mod:`amqp` version 1.3. - No longer supports Python 2.5 The minimum Python version supported is now Python 2.6.0 for Python 2, and Python 3.3 for Python 3. - Dual codebase supporting both Python 2 and 3. No longer using ``2to3``, making it easier to maintain support for both versions. - pickle, yaml and msgpack deserialization is now disabled by default. This means that Kombu will by default refuse to handle any content type other than json. Pickle is known to be a security concern as it will happily load any object that is embedded in a pickle payload, and payloads can be crafted to do almost anything you want. The default serializer in Kombu is json but it also supports a number of other serialization formats that it will evaluate if received: including pickle. It was always assumed that users were educated about the security implications of pickle, but in hindsight we don't think users should be expected to secure their services if we have the ability to be secure by default. By disabling any content type that the user did not explicitly want enabled we ensure that the user must be conscious when they add pickle as a serialization format to support. The other built-in serializers (yaml and msgpack) are also disabled even though they aren't considered insecure [#f1]_ at this point. Instead they're disabled so that if a security flaw is found in one of these libraries in the future, you will only be affected if you have explicitly enabled them. To have your consumer accept formats other than json you have to explicitly add the wanted formats to a white-list of accepted content types: .. code-block:: pycon >>> c = Consumer(conn, accept=['json', 'pickle', 'msgpack']) or when using synchronous access: .. code-block:: pycon >>> msg = queue.get(accept=['json', 'pickle', 'msgpack']) The ``accept`` argument was first supported for consumers in version 2.5.10, and first supported by ``Queue.get`` in version 2.5.15 so to stay compatible with previous versions you can enable the previous behavior: >>> from kombu import enable_insecure_serializers >>> enable_insecure_serializers() But note that this has global effect, so be very careful should you use it. .. rubric:: Footnotes .. [#f1] The PyYAML library has a :func:`yaml.load` function with some of the same security implications as pickle, but Kombu uses the :func:`yaml.safe_load` function which is not known to be affected. - kombu.async: Experimental event loop implementation. This code was previously in Celery but was moved here to make it easier for async transport implementations. The API is meant to match the Tulip API which will be included in Python 3.4 as the ``asyncio`` module. It's not a complete implementation obviously, but the goal is that it will be easy to change to it once that is possible. - Utility function ``kombu.common.ipublish`` has been removed. Use ``Producer(..., retry=True)`` instead. - Utility function ``kombu.common.isend_reply`` has been removed Use ``send_reply(..., retry=True)`` instead. - ``kombu.common.entry_to_queue`` and ``kombu.messaging.entry_to_queue`` has been removed. Use ``Queue.from_dict(name, **options)`` instead. - Redis: Messages are now restored at the end of the list. Contributed by Mark Lavin. - ``StdConnectionError`` and ``StdChannelError`` is removed and :exc:`amqp.ConnectionError` and :exc:`amqp.ChannelError` is used instead. - Message object implementation has moved to :class:`kombu.message.Message`. - Serailization: Renamed functions encode/decode to :func:`~kombu.serialization.dumps` and :func:`~kombu.serialization.loads`. For backward compatibility the old names are still available as aliases. - The ``kombu.log.anon_logger`` function has been removed. Use :func:`~kombu.log.get_logger` instead. - ``queue_declare`` now returns namedtuple with ``queue``, ``message_count``, and ``consumer_count`` fields. - LamportClock: Can now set lock class - :mod:`kombu.utils.clock`: Utilities for ordering events added. - :class:`~kombu.simple.SimpleQueue` now allows you to override the exchange type used. Contributed by Vince Gonzales. - Zookeeper transport updated to support new changes in the :mod:`kazoo` library. Contributed by Mahendra M. - pyamqp/librabbitmq: Transport options are now forwarded as keyword arguments to the underlying connection (Issue #214). - Transports may now distinguish between recoverable and irrecoverable connection and channel errors. - ``kombu.utils.Finalize`` has been removed: Use :mod:`multiprocessing.util.Finalize` instead. - Memory transport now supports the fanout exchange type. Contributed by Davanum Srinivas. - Experimental new `Pyro`_ transport (:mod:`kombu.transport.pyro`). Contributed by Tommie McAfee. .. _`Pyro`: http://pythonhosted.org/Pyro - Experimental new `SoftLayer MQ`_ transport (:mod:`kombu.transport.SLMQ`). Contributed by Kevin McDonald .. _`SoftLayer MQ`: http://www.softlayer.com/services/additional/message-queue - Eventio: Kqueue breaks in subtle ways so select is now used instead. - SQLAlchemy transport: Can now specify table names using the ``queue_tablename`` and ``message_tablename`` transport options. Contributed by Ryan Petrello. Redis transport: Now supports using local UNIX sockets to communicate with the Redis server (Issue #1283) To connect using a UNIX socket you have to use the ``redis+socket`` URL-prefix: ``redis+socket:///tmp/redis.sock``. This functionality was merged from the `celery-redis-unixsocket`_ project. Contributed by Maxime Rouyrre. ZeroMQ transport: drain_events now supports timeout. Contributed by Jesper Thomschütz. .. _`celery-redis-unixsocket`: https://github.com/piquadrat/celery-redis-unixsocket .. _version-2.5.16: 2.5.16 ====== :release-date: 2013-10-04 03:30 P.M BST :release-by: Ask Solem - Python 3: Fixed problem with dependencies not being installed. .. _version-2.5.15: 2.5.15 ====== :release-date: 2013-10-04 03:30 P.M BST :release-by: Ask Solem - Declaration cache: Now only keeps hash of declaration so that it does not keep a reference to the channel. - Declaration cache: Now respects ``entity.can_cache_declaration`` attribute. - Fixes Python 2.5 compatibility. - Fixes tests after python-msgpack changes. - ``Queue.get``: Now supports ``accept`` argument. .. _version-2.5.14: 2.5.14 ====== :release-date: 2013-08-23 05:00 P.M BST :release-by: Ask Solem - safe_str did not work properly resulting in :exc:`UnicodeDecodeError` (Issue #248). .. _version-2.5.13: 2.5.13 ====== :release-date: 2013-08-16 04:00 P.M BST :release-by: Ask Solem - Now depends on :mod:`amqp` 1.0.13 - Fixed typo in Django functional tests. - safe_str now returns Unicode in Python 2.x Fix contributed by Germán M. Bravo. - amqp: Transport options are now merged with arguments supplied to the connection. - Tests no longer depends on distribute, which was deprecated and merged back into setuptools. Fix contributed by Sascha Peilicke. - ConsumerMixin now also restarts on channel related errors. Fix contributed by Corentin Ardeois. .. _version-2.5.12: 2.5.12 ====== :release-date: 2013-06-28 03:30 P.M BST :release-by: Ask Solem - Redis: Ignore errors about keys missing in the round-robin cycle. - Fixed test suite errors on Python 3. - Fixed msgpack test failures. .. _version-2.5.11: 2.5.11 ====== :release-date: 2013-06-25 02:30 P.M BST :release-by: Ask Solem - Now depends on amqp 1.0.12 (Py3 compatibility issues). - MongoDB: Removed cause of a "database name in URI is being ignored" warning. Fix by Flavio Percoco Premoli - Adds ``passive`` option to :class:`~kombu.Exchange`. Setting this flag means that the exchange will not be declared by kombu, but that it must exist already (or an exception will be raised). Contributed by Rafal Malinowski - Connection.info() now gives the current hostname and not the list of available hostnames. Fix contributed by John Shuping. - pyamqp: Transport options are now forwarded as kwargs to ``amqp.Connection``. - librabbitmq: Transport options are now forwarded as kwargs to ``librabbitmq.Connection``. - librabbitmq: Now raises :exc:`NotImplementedError` if SSL is enabled. The librabbitmq library does not support ssl, but you can use stunnel or change to the ``pyamqp://`` transport instead. Fix contributed by Dan LaMotte. - librabbitmq: Fixed a cyclic reference at connection close. - eventio: select implementation now removes bad file descriptors. - eventio: Fixed Py3 compatibility problems. - Functional tests added for py-amqp and librabbitmq transports. - Resource.force_close_all no longer uses a mutex. - Pidbox: Now ignores `IconsistencyError` when sending replies, as this error simply means that the client may no longer be alive. - Adds new :meth:`Connection.collect <~kombu.Connection.collect>` method, that can be used to clean up after connections without I/O. - ``queue_bind`` is no longer called for queues bound to the "default exchange" (Issue #209). Contributed by Jonathan Halcrow. - The max_retries setting for retries was not respected correctly (off by one). .. _version-2.5.10: 2.5.10 ====== :release-date: 2013-04-11 06:10 P.M BST :release-by: Ask Solem Note about upcoming changes for Kombu 3.0 ----------------------------------------- Kombu 3 consumers will no longer accept pickle/yaml or msgpack by default, and you will have to explicitly enable untrusted deserializers either globally using :func:`kombu.enable_insecure_serializers`, or using the ``accept`` argument to :class:`~kombu.Consumer`. Changes ------- - New utility function to disable/enable untrusted serializers. - :func:`kombu.disable_insecure_serializers` - :func:`kombu.enable_insecure_serializers`. - Consumer: ``accept`` can now be used to specify a whitelist of content types to accept. If the accept whitelist is set and a message is received with a content type that is not in the whitelist then a :exc:`~kombu.exceptions.ContentDisallowed` exception is raised. Note that this error can be handled by the already existing `on_decode_error` callback Examples: .. code-block:: python Consumer(accept=['application/json']) Consumer(accept=['pickle', 'json']) - Now depends on amqp 1.0.11 - pidbox: Mailbox now supports the ``accept`` argument. - Redis: More friendly error for when keys are missing. - Connection URLs: The parser did not work well when there were multiple '+' tokens. .. _version-2.5.9: 2.5.9 ===== :release-date: 2013-04-08 05:07 P.M BST :release-by: Ask Solem - Pidbox: Now warns if there are multiple nodes consuming from the same pidbox. - Adds :attr:`Queue.on_declared ` A callback to be called when the queue is declared, with signature ``(name, messages, consumers)``. - Now uses fuzzy matching to suggest alternatives to typos in transport names. - SQS: Adds new transport option ``queue_prefix``. Contributed by j0hnsmith. - pyamqp: No longer overrides verify_connection. - SQS: Now specifies the ``driver_type`` and ``driver_name`` attributes. Fix contributed by Mher Movsisyan. - Fixed bug with ``kombu.utils.retry_over_time`` when no errback specified. .. _version-2.5.8: 2.5.8 ===== :release-date: 2013-03-21 04:00 P.M UTC :release-by: Ask Solem - Now depends on :mod:`amqp` 1.0.10 which fixes a Python 3 compatibility error. - Redis: Fixed a possible race condition (Issue #171). - Redis: Ack emulation/visibility_timeout can now be disabled using a transport option. Ack emulation adds quite a lot of overhead to ensure data is safe even in the event of an unclean shutdown. If data loss do not worry you there is now an `ack_emulation` transport option you can use to disable it: .. code-block:: python Connection('redis://', transport_options={'ack_emulation': False}) - SQS: Fixed :mod:`boto` v2.7 compatibility (Issue #207). - Exchange: Should not try to re-declare default exchange (``""``) (Issue #209). - SQS: Long polling is now disabled by default as it was not implemented correctly, resulting in long delays between receiving messages (Issue #202). - Fixed Python 2.6 incompatibility depending on ``exc.errno`` being available. Fix contributed by Ephemera. .. _version-2.5.7: 2.5.7 ===== :release-date: 2013-03-08 01:00 P.M UTC :release-by: Ask Solem - Now depends on amqp 1.0.9 - Redis: A regression in 2.5.6 caused the redis transport to ignore options set in ``transport_options``. - Redis: New ``socket_timeout`` transport option. - Redis: ``InconsistencyError`` is now regarded as a recoverable error. - Resource pools: Will no longer attempt to release resource that was never acquired. - MongoDB: Now supports the ``ssl`` option. Contributed by Sebastian Pawlus. .. _version-2.5.6: 2.5.6 ===== :release-date: 2013-02-08 01:00 P.M UTC :release-by: Ask Solem - Now depends on amqp 1.0.8 which works around a bug found on some Python 2.5 installations where 2**32 overflows to 0. .. _version-2.5.5: 2.5.5 ===== :release-date: 2013-02-07 05:00 P.M UTC :release-by: Ask Solem SQS: Now supports long polling (Issue #176). The polling interval default has been changed to 0 and a new transport option (``wait_time_seconds``) has been added. This parameter specifies how long to wait for a message from SQS, and defaults to 20 seconds, which is the maximum value currently allowed by Amazon SQS. Contributed by James Saryerwinnie. - SQS: Now removes unpickleable fields before restoring messages. - Consumer.__exit__ now ignores exceptions occurring while canceling the consumer. - Virtual: Routing keys can now consist of characters also used in regular expressions (e.g. parens) (Issue #194). - Virtual: Fixed compression header when restoring messages. Fix contributed by Alex Koshelev. - Virtual: ack/reject/requeue now works while using ``basic_get``. - Virtual: Message.reject is now supported by virtual transports (requeue depends on individual transport support). - Fixed typo in hack used for static analyzers. Fix contributed by Basil Mironenko. .. _version-2.5.4: 2.5.4 ===== :release-date: 2012-12-10 12:35 P.M UTC :release-by: Ask Solem - Fixed problem with connection clone and multiple URLs (Issue #182). Fix contributed by Dane Guempel. - zeromq: Now compatible with libzmq 3.2.x. Fix contributed by Andrey Antukh. - Fixed Python 3 installation problem (Issue #187). .. _version-2.5.3: 2.5.3 ===== :release-date: 2012-11-29 12:35 P.M UTC :release-by: Ask Solem - Pidbox: Fixed compatibility with Python 2.6 2.5.2 ===== :release-date: 2012-11-29 12:35 P.M UTC :release-by: Ask Solem .. _version-2.5.2: 2.5.2 ===== :release-date: 2012-11-29 12:35 P.M UTC :release-by: Ask Solem - [Redis] Fixed connection leak and added a new 'max_connections' transport option. .. _version-2.5.1: 2.5.1 ===== :release-date: 2012-11-28 12:45 P.M UTC :release-by: Ask Solem - Fixed bug where return value of Queue.as_dict could not be serialized with JSON (Issue #177). .. _version-2.5.0: 2.5.0 ===== :release-date: 2012-11-27 04:00 P.M UTC :release-by: Ask Solem - `py-amqp`_ is now the new default transport, replacing ``amqplib``. The new `py-amqp`_ library is a fork of amqplib started with the following goals: - Uses AMQP 0.9.1 instead of 0.8 - Support for heartbeats (Issue #79 + Issue #131) - Automatically revives channels on channel errors. - Support for all RabbitMQ extensions - Consumer Cancel Notifications (Issue #131) - Publisher Confirms (Issue #131). - Exchange-to-exchange bindings: ``exchange_bind`` / ``exchange_unbind``. - API compatible with :mod:`librabbitmq` so that it can be used as a pure-python replacement in environments where rabbitmq-c cannot be compiled. librabbitmq will be updated to support all the same features as py-amqp. - Support for using multiple connection URL's for failover. The first argument to :class:`~kombu.Connection` can now be a list of connection URLs: .. code-block:: python Connection(['amqp://foo', 'amqp://bar']) or it can be a single string argument with several URLs separated by semicolon: .. code-block:: python Connection('amqp://foo;amqp://bar') There is also a new keyword argument ``failover_strategy`` that defines how :meth:`~kombu.Connection.ensure_connection`/ :meth:`~kombu.Connection.ensure`/:meth:`kombu.Connection.autoretry` will reconnect in the event of connection failures. The default reconnection strategy is ``round-robin``, which will simply cycle through the list forever, and there's also a ``shuffle`` strategy that will select random hosts from the list. Custom strategies can also be used, in that case the argument must be a generator yielding the URL to connect to. Example: .. code-block:: python Connection('amqp://foo;amqp://bar') - Now supports PyDev, PyCharm, pylint and other static code analysis tools. - :class:`~kombu.Queue` now supports multiple bindings. You can now have multiple bindings in the same queue by having the second argument be a list: .. code-block:: python from kombu import binding, Queue Queue('name', [ binding(Exchange('E1'), routing_key='foo'), binding(Exchange('E1'), routing_key='bar'), binding(Exchange('E2'), routing_key='baz'), ]) To enable this, helper methods have been added: - :meth:`~kombu.Queue.bind_to` - :meth:`~kombu.Queue.unbind_from` Contributed by Rumyana Neykova. - Custom serializers can now be registered using Setuptools entry-points. See :ref:`serialization-entrypoints`. - New :class:`kombu.common.QoS` class used as a thread-safe way to manage changes to a consumer or channels prefetch_count. This was previously an internal class used in Celery now moved to the :mod:`kombu.common` module. - Consumer now supports a ``on_message`` callback that can be used to process raw messages (not decoded). Other callbacks specified using the ``callbacks`` argument, and the ``receive`` method will be not be called when a on message callback is present. - New utility :func:`kombu.common.ignore_errors` ignores connection and channel errors. Must only be used for cleanup actions at shutdown or on connection loss. - Support for exchange-to-exchange bindings. The :class:`~kombu.Exchange` entity gained ``bind_to`` and ``unbind_from`` methods: .. code-block:: python e1 = Exchange('A')(connection) e2 = Exchange('B')(connection) e2.bind_to(e1, routing_key='rkey', arguments=None) e2.unbind_from(e1, routing_key='rkey', arguments=None) This is currently only supported by the ``pyamqp`` transport. Contributed by Rumyana Neykova. .. _version-2.4.10: 2.4.10 ====== :release-date: 2012-11-22 06:00 P.M UTC :release-by: Ask Solem - The previous versions connection pool changes broke Redis support so that it would always connect to localhost (default setting) no matter what connection parameters were provided (Issue #176). .. _version-2.4.9: 2.4.9 ===== :release-date: 2012-11-21 03:00 P.M UTC :release-by: Ask Solem - Redis: Fixed race condition that could occur while trying to restore messages (Issue #171). Fix contributed by Ollie Walsh. - Redis: Each channel is now using a specific connection pool instance, which is disconnected on connection failure. - ProducerPool: Fixed possible dead-lock in the acquire method. - ProducerPool: ``force_close_all`` no longer tries to call the non-existent ``Producer._close``. - librabbitmq: Now implements ``transport.verify_connection`` so that connection pools will not give back connections that are no longer working. - New and better ``repr()`` for Queue and Exchange objects. - Python 3: Fixed problem with running the unit test suite. - Python 3: Fixed problem with JSON codec. .. _version-2.4.8: 2.4.8 ===== :release-date: 2012-11-02 05:00 P.M UTC :release-by: Ask Solem - Redis: Improved fair queue cycle implementation (Issue #166). Contributed by Kevin McCarthy. - Redis: Unacked message restore limit is now unlimited by default. Also, the limit can now be configured using the ``unacked_restore_limit`` transport option: .. code-block:: python Connection('redis://', transport_options={ 'unacked_restore_limit': 100, }) A limit of 100 means that the consumer will restore at most 100 messages at each pass. - Redis: Now uses a mutex to ensure only one consumer restores messages at a time. The mutex expires after 5 minutes by default, but can be configured using the ``unacked_mutex_expire`` transport option. - LamportClock.adjust now returns the new clock value. - Heartbeats can now be specified in URLs. Fix contributed by Mher Movsisyan. - Kombu can now be used with PyDev, PyCharm and other static analysis tools. - Fixes problem with msgpack on Python 3 (Issue #162). Fix contributed by Jasper Bryant-Greene - amqplib: Fixed bug with timeouts when SSL is used in non-blocking mode. Fix contributed by Mher Movsisyan .. _version-2.4.7: 2.4.7 ===== :release-date: 2012-09-18 03:00 P.M BST :release-by: Ask Solem - Virtual: Unknown exchanges now default to 'direct' when sending a message. - MongoDB: Fixed memory leak when merging keys stored in the db (Issue #159) Fix contributed by Michael Korbakov. - MongoDB: Better index for MongoDB transport (Issue #158). This improvement will create a new compund index for queue and _id in order to be able to use both indexed fields for getting a new message (using queue field) and sorting by _id. It'll be necessary to manually delete the old index from the collection. Improvement contributed by rmihael .. _version-2.4.6: 2.4.6 ===== :release-date: 2012-09-12 03:00 P.M BST :release-by: Ask Solem - Adds additional compatibility dependencies: - Python <= 2.6: - importlib - ordereddict - Python <= 2.5 - simplejson .. _version-2.4.5: 2.4.5 ===== :release-date: 2012-08-30 03:36 P.M BST :release-by: Ask Solem - Last version broke installtion on PyPy and Jython due to test requirements clean-up. .. _version-2.4.4: 2.4.4 ===== :release-date: 2012-08-29 04:00 P.M BST :release-by: Ask Solem - amqplib: Fixed a bug with asynchronously reading large messages. - pyamqp: Now requires amqp 0.9.3 - Cleaned up test requirements. .. _version-2.4.3: 2.4.3 ===== :release-date: 2012-08-25 10:30 P.M BST :release-by: Ask Solem - Fixed problem with amqp transport alias (Issue #154). .. _version-2.4.2: 2.4.2 ===== :release-date: 2012-08-24 05:00 P.M BST :release-by: Ask Solem - Having an empty transport name broke in 2.4.1. .. _version-2.4.1: 2.4.1 ===== :release-date: 2012-08-24 04:00 P.M BST :release-by: Ask Solem - Redis: Fixed race condition that could cause the consumer to crash (Issue #151) Often leading to the error message ``"could not convert string to float"`` - Connection retry could cause an inifite loop (Issue #145). - The ``amqp`` alias is now resolved at runtime, so that eventlet detection works even if patching was done later. .. _version-2.4.0: 2.4.0 ===== :release-date: 2012-08-17 08:00 P.M BST :release-by: Ask Solem - New experimental :mod:`ZeroMQ >> conn = Connection('pyamqp://guest:guest@localhost//') The ``pyamqp://`` transport will be the default fallback transport in Kombu version 3.0, when :mod:`librabbitmq` is not installed, and librabbitmq will also be updated to support the same features. - Connection now supports heartbeat argument. If enabled you must make sure to manually maintain heartbeats by calling the ``Connection.heartbeat_check`` at twice the rate of the specified heartbeat interval. E.g. if you have ``Connection(heartbeat=10)``, then you must call ``Connection.heartbeat_check()`` every 5 seconds. if the server has not sent heartbeats at a suitable rate then the heartbeat check method must raise an error that is listed in ``Connection.connection_errors``. The attribute ``Connection.supports_heartbeats`` has been added for the ability to inspect if a transport supports heartbeats or not. Calling ``heartbeat_check`` on a transport that does not support heartbeats results in a noop operation. - SQS: Fixed bug with invalid characters in queue names. Fix contributed by Zach Smith. - utils.reprcall: Fixed typo where kwargs argument was an empty tuple by default, and not an empty dict. .. _version-2.2.6: 2.2.6 ===== :release-date: 2012-07-10 05:00 P.M BST :release-by: Ask Solem - Adds ``kombu.messaging.entry_to_queue`` for compat with previous versions. .. _version-2.2.5: 2.2.5 ===== :release-date: 2012-07-10 05:00 P.M BST :release-by: Ask Solem - Pidbox: Now sets queue expire at 10 seconds for reply queues. - EventIO: Now ignores ``ValueError`` raised by epoll unregister. - MongoDB: Fixes Issue #142 Fix by Flavio Percoco Premoli .. _version-2.2.4: 2.2.4 ===== :release-date: 2012-07-05 04:00 P.M BST :release-by: Ask Solem - Support for msgpack-python 0.2.0 (Issue #143) The latest msgpack version no longer supports Python 2.5, so if you're still using that you need to depend on an earlier msgpack-python version. Fix contributed by Sebastian Insua - :func:`~kombu.common.maybe_declare` no longer caches entities with the ``auto_delete`` flag set. - New experimental filesystem transport. Contributed by Bobby Beever. - Virtual Transports: Now support anonymous queues and exchanges. .. _version-2.2.3: 2.2.3 ===== :release-date: 2012-06-24 05:00 P.M BST :release-by: Ask Solem - ``BrokerConnection`` now renamed to ``Connection``. The name ``Connection`` has been an alias for a very long time, but now the rename is official in the documentation as well. The Connection alias has been available since version 1.1.3, and ``BrokerConnection`` will still work and is not deprecated. - ``Connection.clone()`` now works for the sqlalchemy transport. - :func:`kombu.common.eventloop`, :func:`kombu.utils.uuid`, and :func:`kombu.utils.url.parse_url` can now be imported from the :mod:`kombu` module directly. - Pidbox transport callback ``after_reply_message_received`` now happens in a finally block. - Trying to use the ``librabbitmq://`` transport will now show the right name in the :exc:`ImportError` if :mod:`librabbitmq` is not installed. The librabbitmq falls back to the older ``pylibrabbitmq`` name for compatibility reasons and would therefore show ``No module named pylibrabbitmq`` instead of librabbitmq. .. _version-2.2.2: 2.2.2 ===== :release-date: 2012-06-22 02:30 P.M BST :release-by: Ask Solem - Now depends on :mod:`anyjson` 0.3.3 - Json serializer: Now passes :class:`buffer` objects directly, since this is supported in the latest :mod:`anyjson` version. - Fixes blocking epoll call if timeout was set to 0. Fix contributed by John Watson. - setup.py now takes requirements from the :file:`requirements/` directory. - The distribution directory :file:`contrib/` is now renamed to :file:`extra/` .. _version-2.2.1: 2.2.1 ===== :release-date: 2012-06-21 01:00 P.M BST :release-by: Ask Solem - SQS: Default visibility timeout is now 30 minutes. Since we have ack emulation the visibility timeout is only in effect if the consumer is abrubtly terminated. - retry argument to ``Producer.publish`` now works properly, when the declare argument is specified. - Json serializer: didn't handle buffer objects (Issue #135). Fix contributed by Jens Hoffrichter. - Virtual: Now supports passive argument to ``exchange_declare``. - Exchange & Queue can now be bound to connections (which will use the default channel): .. code-block:: pycon >>> exchange = Exchange('name') >>> bound_exchange = exchange(connection) >>> bound_exchange.declare() - ``SimpleQueue`` & ``SimpleBuffer`` can now be bound to connections (which will use the default channel). - ``Connection.manager.get_bindings`` now works for librabbitmq and pika. - Adds new transport info attributes: - ``Transport.driver_type`` Type of underlying driver, e.g. "amqp", "redis", "sql". - ``Transport.driver_name`` Name of library used e.g. "amqplib", "redis", "pymongo". - ``Transport.driver_version()`` Version of underlying library. .. _version-2.2.0: 2.2.0 ===== :release-date: 2012-06-07 03:10 P.M BST :release-by: Ask Solem .. _v220-important: Important Notes --------------- - The canonical source code repository has been moved to http://github.com/celery/kombu - Pidbox: Exchanges used by pidbox are no longer auto_delete. Auto delete has been described as a misfeature, and therefore we have disabled it. For RabbitMQ users old exchanges used by pidbox must be removed, these are named ``mailbox_name.pidbox``, and ``reply.mailbox_name.pidbox``. The following command can be used to clean up these exchanges: .. code-block:: text $ VHOST=/ URL=amqp:// python -c'import sys,kombu;[kombu.Connection( sys.argv[-1]).channel().exchange_delete(x) for x in sys.argv[1:-1]]' \ $(sudo rabbitmqctl -q list_exchanges -p "$VHOST" \ | grep \.pidbox | awk '{print $1}') "$URL" The :envvar:`VHOST` variable must be set to the target RabbitMQ virtual host, and the :envvar:`URL` must be the AMQP URL to the server. - The ``amqp`` transport alias will now use :mod:`librabbitmq` if installed. `py-librabbitmq`_ is a fast AMQP client for Python using the librabbitmq C library. It can be installed by: .. code-block:: console $ pip install librabbitmq It will not be used if the process is monkey patched by eventlet/gevent. .. _`py-librabbitmq`: https://github.com/celery/librabbitmq .. _v220-news: News ---- - Redis: Ack emulation improvements. Reducing the possibility of data loss. Acks are now implemented by storing a copy of the message when the message is consumed. The copy is not removed until the consumer acknowledges or rejects it. This means that unacknowledged messages will be redelivered either when the connection is closed, or when the visibility timeout is exceeded. - Visibility timeout This is a timeout for acks, so that if the consumer does not ack the message within this time limit, the message is redelivered to another consumer. The timeout is set to one hour by default, but can be changed by configuring a transport option: >>> Connection('redis://', transport_options={ ... 'visibility_timeout': 1800, # 30 minutes ... }) **NOTE**: Messages that have not been acked will be redelivered if the visibility timeout is exceeded, for Celery users this means that ETA/countdown tasks that are scheduled to execute with a time that exceeds the visibility timeout will be executed twice (or more). If you plan on using long ETA/countdowns you should tweak the visibility timeout accordingly: .. code-block:: python BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 18000} # 5 hours Setting a long timeout means that it will take a long time for messages to be redelivered in the event of a power failure, but if so happens you could temporarily set the visibility timeout lower to flush out messages when you start up the systems again. - Experimental `Apache ZooKeeper`_ transport More information is in the module reference: :mod:`kombu.transport.zookeeper`. Contributed by Mahendra M. .. _`Apache ZooKeeper`: http://zookeeper.apache.org/ - Redis: Priority support. The message's ``priority`` field is now respected by the Redis transport by having multiple lists for each named queue. The queues are then consumed by in order of priority. The priority field is a number in the range of 0 - 9, where 0 is the default and highest priority. The priority range is collapsed into four steps by default, since it is unlikely that nine steps will yield more benefit than using four steps. The number of steps can be configured by setting the ``priority_steps`` transport option, which must be a list of numbers in **sorted order**: .. code-block:: pycon >>> x = Connection('redis://', transport_options={ ... 'priority_steps': [0, 2, 4, 6, 8, 9], ... }) Priorities implemented in this way is not as reliable as priorities on the server side, which is why nickname the feature "quasi-priorities"; **Using routing is still the suggested way of ensuring quality of service**, as client implemented priorities fall short in a number of ways, e.g. if the worker is busy with long running tasks, has prefetched many messages, or the queues are congested. Still, it is possible that using priorities in combination with routing can be more beneficial than using routing or priorities alone. Experimentation and monitoring should be used to prove this. Contributed by Germán M. Bravo. - Redis: Now cycles queues so that consuming is fair. This ensures that a very busy queue won't block messages from other queues, and ensures that all queues have an equal chance of being consumed from. This used to be the case before, but the behavior was accidentally changed while switching to using blocking pop. - Redis: Auto delete queues that are bound to fanout exchanges is now deleted at channel.close. - amqplib: Refactored the drain_events implementation. - Pidbox: Now uses ``connection.default_channel``. - Pickle serialization: Can now decode buffer objects. - Exchange/Queue declarations can now be cached even if the entity is non-durable. This is possible because the list of cached declarations are now kept with the connection, so that the entities will be redeclared if the connection is lost. - Kombu source code now only uses one-level of explicit relative imports. .. _v220-fixes: Fixes ----- - eventio: Now ignores ENOENT raised by ``epoll.register``, and EEXIST from ``epoll.unregister``. - eventio: kqueue now ignores :exc:`KeyError` on unregister. - Redis: ``Message.reject`` now supports the ``requeue`` argument. - Redis: Remove superfluous pipeline call. Fix contributed by Thomas Johansson. - Redis: Now sets redelivered header for redelivered messages. - Now always makes sure references to :func:`sys.exc_info` is removed. - Virtual: The compression header is now removed before restoring messages. - More tests for the SQLAlchemy backend. Contributed by Franck Cuny. - Url parsing did not handle MongoDB URLs properly. Fix contributed by Flavio Percoco Premoli. - Beanstalk: Ignore default tube when reserving. Fix contributed by Zhao Xiaohong. Nonblocking consume support --------------------------- librabbitmq, amqplib and redis transports can now be used non-blocking. The interface is very manual, and only consuming messages is non-blocking so far. The API should not be regarded as stable or final in any way. It is used by Celery which has very limited needs at this point. Hopefully we can introduce a proper callback-based API later. - ``Transport.eventmap`` Is a map of ``fd -> callback(fileno, event)`` to register in an eventloop. - ``Transport.on_poll_start()`` Is called before every call to poll. The poller must support ``register(fd, callback)`` and ``unregister(fd)`` methods. - ``Transport.on_poll_start(poller)`` Called when the hub is initialized. The poller argument must support the same interface as :class:`kombu.utils.eventio.poll`. - ``Connection.ensure_connection`` now takes a callback argument which is called for every loop while the connection is down. - Adds ``connection.drain_nowait`` This is a non-blocking alternative to drain_events, but only supported by amqplib/librabbitmq. - drain_events now sets ``connection.more_to_read`` if there is more data to read. This is to support eventloops where other things must be handled between draining events. .. _version-2.1.8: 2.1.8 ===== :release-date: 2012-05-06 03:06 P.M BST :release-by: Ask Solem * Bound Exchange/Queue's are now pickleable. * Consumer/Producer can now be instantiated without a channel, and only later bound using ``.revive(channel)``. * ProducerPool now takes ``Producer`` argument. * :func:`~kombu.utils.fxrange` now counts forever if the stop argument is set to None. (fxrange is like xrange but for decimals). * Auto delete support for virtual transports were incomplete and could lead to problems so it was removed. * Cached declarations (:func:`~kombu.common.maybe_declare`) are now bound to the underlying connection, so that entities are redeclared if the connection is lost. This also means that previously uncacheable entities (e.g. non-durable) can now be cached. * compat ConsumerSet: can now specify channel. .. _version-2.1.7: 2.1.7 ===== :release-date: 2012-04-27 06:00 P.M BST :release-by: Ask Solem * compat consumerset now accepts optional channel argument. .. _version-2.1.6: 2.1.6 ===== :release-date: 2012-04-23 01:30 P.M BST :release-by: Ask Solem * SQLAlchemy transport was not working correctly after URL parser change. * maybe_declare now stores cached declarations per underlying connection instead of globally, in the rare case that data disappears from the broker after connection loss. * Django: Added South migrations. Contributed by Joseph Crosland. .. _version-2.1.5: 2.1.5 ===== :release-date: 2012-04-13 03:30 P.M BST :release-by: Ask Solem * The url parser removed more than the first leading slash (Issue #121). * SQLAlchemy: Can now specify url using + separator Example: .. code-block:: python Connection('sqla+mysql://localhost/db') * Better support for anonymous queues (Issue #116). Contributed by Michael Barrett. * ``Connection.as_uri`` now quotes url parts (Issue #117). * Beanstalk: Can now set message TTR as a message property. Contributed by Andrii Kostenko .. _version-2.1.4: 2.1.4 ===== :release-date: 2012-04-03 04:00 P.M GMT :release-by: Ask Solem * MongoDB: URL parsing are now delegated to the pymongo library (Fixes Issue #103 and Issue #87). Fix contributed by Flavio Percoco Premoli and James Sullivan * SQS: A bug caused SimpleDB to be used even if sdb persistence was not enabled (Issue #108). Fix contributed by Anand Kumria. * Django: Transaction was committed in the wrong place, causing data cleanup to fail (Issue #115). Fix contributed by Daisuke Fujiwara. * MongoDB: Now supports replica set URLs. Contributed by Flavio Percoco Premoli. * Redis: Now raises a channel error if a queue key that is currently being consumed from disappears. Fix contributed by Stephan Jaekel. * All transport 'channel_errors' lists now includes ``kombu.exception.StdChannelError``. * All kombu exceptions now inherit from a common :exc:`~kombu.exceptions.KombuError`. .. _version-2.1.3: 2.1.3 ===== :release-date: 2012-03-20 03:00 P.M GMT :release-by: Ask Solem * Fixes Jython compatibility issues. * Fixes Python 2.5 compatibility issues. .. _version-2.1.2: 2.1.2 ===== :release-date: 2012-03-01 01:00 P.M GMT :release-by: Ask Solem * amqplib: Last version broke SSL support. .. _version-2.1.1: 2.1.1 ===== :release-date: 2012-02-24 02:00 P.M GMT :release-by: Ask Solem * Connection URLs now supports encoded characters. * Fixed a case where connection pool could not recover from connection loss. Fix contributed by Florian Munz. * We now patch amqplib's ``__del__`` method to skip trying to close the socket if it is not connected, as this resulted in an annoying warning. * Compression can now be used with binary message payloads. Fix contributed by Steeve Morin. .. _version-2.1.0: 2.1.0 ===== :release-date: 2012-02-04 10:38 P.M GMT :release-by: Ask Solem * MongoDB: Now supports fanout (broadcast) (Issue #98). Contributed by Scott Lyons. * amqplib: Now detects broken connections by using ``MSG_PEEK``. * pylibrabbitmq: Now supports ``basic_get`` (Issue #97). * gevent: Now always uses the ``select`` polling backend. * pika transport: Now works with pika 0.9.5 and 0.9.6dev. The old pika transport (supporting 0.5.x) is now available as alias ``oldpika``. (Note terribly latency has been experienced with the new pika versions, so this is still an experimental transport). * Virtual transports: can now set polling interval via the transport options (Issue #96). Example: .. code-block:: pycon >>> Connection('sqs://', transport_options={ ... 'polling_interval': 5.0}) The default interval is transport specific, but usually 1.0s (or 5.0s for the Django database transport, which can also be set using the ``KOMBU_POLLING_INTERVAL`` setting). * Adds convenience function: :func:`kombu.common.eventloop`. .. _version-2.0.0: 2.0.0 ===== :release-date: 2012-01-15 06:34 P.M GMT :release-by: Ask Solem .. _v200-important: Important Notes --------------- .. _v200-python-compatibility: Python Compatibility ~~~~~~~~~~~~~~~~~~~~ * No longer supports Python 2.4. Users of Python 2.4 can still use the 1.x series. The 1.x series has entered bugfix-only maintenance mode, and will stay that way as long as there is demand, and a willingness to maintain it. .. _v200-new-transports: New Transports ~~~~~~~~~~~~~~ * ``django-kombu`` is now part of Kombu core. The Django message transport uses the Django ORM to store messages. It uses polling, with a default polling interval of 5 seconds. The polling interval can be increased or decreased by configuring the ``KOMBU_POLLING_INTERVAL`` Django setting, which is the polling interval in seconds as an int or a float. Note that shorter polling intervals can cause extreme strain on the database: if responsiveness is needed you shall consider switching to a non-polling transport. To use it you must use transport alias ``"django"``, or as a URL: .. code-block:: text django:// and then add ``kombu.transport.django`` to ``INSTALLED_APPS``, and run ``manage.py syncdb`` to create the necessary database tables. **Upgrading** If you have previously used ``django-kombu``, then the entry in ``INSTALLED_APPS`` must be changed from ``djkombu`` to ``kombu.transport.django``: .. code-block:: python INSTALLED_APPS = ( # …, 'kombu.transport.django', ) If you have previously used django-kombu, then there is no need to recreate the tables, as the old tables will be fully compatible with the new version. * ``kombu-sqlalchemy`` is now part of Kombu core. This change requires no code changes given that the ``sqlalchemy`` transport alias is used. .. _v200-news: News ---- * :class:`kombu.mixins.ConsumerMixin` is a mixin class that lets you easily write consumer programs and threads. See :ref:`examples` and :ref:`guide-consumers`. * SQS Transport: Added support for SQS queue prefixes (Issue #84). The queue prefix can be set using the transport option ``queue_name_prefix``: .. code-block:: python BrokerTransport('SQS://', transport_options={ 'queue_name_prefix': 'myapp'}) Contributed by Nitzan Miron. * ``Producer.publish`` now supports automatic retry. Retry is enabled by the ``reply`` argument, and retry options set by the ``retry_policy`` argument: .. code-block:: python exchange = Exchange('foo') producer.publish(message, exchange=exchange, retry=True, declare=[exchange], retry_policy={ 'interval_start': 1.0}) See :meth:`~kombu.Connection.ensure` for a list of supported retry policy options. * ``Producer.publish`` now supports a ``declare`` keyword argument. This is a list of entities (:class:`Exchange`, or :class:`Queue`) that should be declared before the message is published. .. _v200-fixes: Fixes ----- * Redis transport: Timeout was multiplied by 1000 seconds when using ``select`` for event I/O (Issue #86). .. _version-1.5.1: 1.5.1 ===== :release-date: 2011-11-30 01:00 P.M GMT :release-by: Ask Solem * Fixes issue with ``kombu.compat`` introduced in 1.5.0 (Issue #83). * Adds the ability to disable content_types in the serializer registry. Any message with a content type that is disabled will be refused. One example would be to disable the Pickle serializer: >>> from kombu.serialization import registry # by name >>> registry.disable('pickle') # or by mime-type. >>> registry.disable('application/x-python-serialize') .. _version-1.5.0: 1.5.0 ===== :release-date: 2011-11-27 06:00 P.M GMT :release-by: Ask Solem * kombu.pools: Fixed a bug resulting in resources not being properly released. This was caused by the use of ``__hash__`` to distinguish them. * Virtual transports: Dead-letter queue is now disabled by default. The dead-letter queue was enabled by default to help application authors, but now that Kombu is stable it should be removed. There are after all many cases where messages should just be dropped when there are no queues to buffer them, and keeping them without supporting automatic cleanup is rather considered a resource leak than a feature. If wanted the dead-letter queue can still be enabled, by using the ``deadletter_queue`` transport option: .. code-block:: pycon >>> x = Connection('redis://', ... transport_options={'deadletter_queue': 'ae.undeliver'}) In addition, an :class:`UndeliverableWarning` is now emitted when the dead-letter queue is enabled and a message ends up there. Contributed by Ionel Maries Cristian. * MongoDB transport now supports Replicasets (Issue #81). Contributed by Ivan Metzlar. * The ``Connection.ensure`` methods now accepts a ``max_retries`` value of 0. A value of 0 now means *do not retry*, which is distinct from :const:`None` which means *retry indefinitely*. Contributed by Dan McGee. * SQS Transport: Now has a lowercase ``sqs`` alias, so that it can be used with broker URLs (Issue #82). Fix contributed by Hong Minhee * SQS Transport: Fixes KeyError on message acknowledgments (Issue #73). The SQS transport now uses UUID's for delivery tags, rather than a counter. Fix contributed by Brian Bernstein. * SQS Transport: Unicode related fixes (Issue #82). Fix contributed by Hong Minhee. * Redis version check could crash because of improper handling of types (Issue #63). * Fixed error with `Resource.force_close_all` when resources were not yet properly initialized (Issue #78). .. _version-1.4.3: 1.4.3 ===== :release-date: 2011-10-27 10:00 P.M BST :release-by: Ask Solem * Fixes bug in ProducerPool where too many resources would be acquired. .. _version-1.4.2: 1.4.2 ===== :release-date: 2011-10-26 05:00 P.M BST :release-by: Ask Solem * Eventio: Polling should ignore `errno.EINTR` * SQS: str.encode did only start accepting kwargs after Py2.7. * simple_task_queue example didn't run correctly (Issue #72). Fix contributed by Stefan Eletzhofer. * Empty messages would not raise an exception not able to be handled by `on_decode_error` (Issue #72) Fix contributed by Christophe Chauvet. * CouchDB: Properly authenticate if user/password set (Issue #70) Fix contributed by Rafael Duran Castaneda * Connection.Consumer had the wrong signature. Fix contributed by Pavel Skvazh .. _version-1.4.1: 1.4.1 ===== :release-date: 2011-09-26 04:00 P.M BST :release-by: Ask Solem * 1.4.0 broke the producer pool, resulting in new connections being established for every acquire. .. _version-1.4.0: 1.4.0 ===== :release-date: 2011-09-22 05:00 P.M BST :release-by: Ask Solem * Adds module :mod:`kombu.mixins`. This module contains a :class:`~kombu.mixins.ConsumerMixin` class that can be used to easily implement a message consumer thread that consumes messages from one or more :class:`kombu.Consumer` instances. * New example: :ref:`task-queue-example` Using the ``ConsumerMixin``, default channels and the global connection pool to demonstrate new Kombu features. * MongoDB transport did not work with MongoDB >= 2.0 (Issue #66) Fix contributed by James Turk. * Redis-py version check did not account for beta identifiers in version string. Fix contributed by David Ziegler. * Producer and Consumer now accepts a connection instance as the first argument. The connections default channel will then be used. In addition shortcut methods has been added to Connection: .. code-block:: pycon >>> connection.Producer(exchange) >>> connection.Consumer(queues=..., callbacks=...) * Connection has aquired a ``connected`` attribute that can be used to check if the connection instance has established a connection. * ``ConnectionPool.acquire_channel`` now returns the connections default channel rather than establising a new channel that must be manually handled. * Added ``kombu.common.maybe_declare`` ``maybe_declare(entity)`` declares an entity if it has not previously been declared in the same process. * :func:`kombu.compat.entry_to_queue` has been moved to :mod:`kombu.common` * New module :mod:`kombu.clocks` now contains an implementation of Lamports logical clock. .. _version-1.3.5: 1.3.5 ===== :release-date: 2011-09-16 06:00 P.M BST :release-by: Ask Solem * Python 3: AMQP_PROTOCOL_HEADER must be bytes, not str. .. _version-1.3.4: 1.3.4 ===== :release-date: 2011-09-16 06:00 P.M BST :release-by: Ask Solem * Fixes syntax error in pools.reset .. _version-1.3.3: 1.3.3 ===== :release-date: 2011-09-15 02:00 P.M BST :release-by: Ask Solem * pools.reset did not support after forker arguments. .. _version-1.3.2: 1.3.2 ===== :release-date: 2011-09-10 01:00 P.M BST :release-by: Mher Movsisyan * Broke Python 2.5 compatibility by importing ``parse_qsl`` from ``urlparse`` * Connection.default_channel is now closed when connection is revived after connection failures. * Pika: Channel now supports the ``connection.client`` attribute as required by the simple interface. * pools.set_limit now raises an exception if the limit is lower than the previous limit. * pools.set_limit no longer resets the pools. .. _version-1.3.1: 1.3.1 ===== :release-date: 2011-10-07 03:00 P.M BST :release-by: Ask Solem * Last release broke after fork for pool reinitialization. * Producer/Consumer now has a ``connection`` attribute, giving access to the :class:`Connection` of the instance. * Pika: Channels now have access to the underlying :class:`Connection` instance using ``channel.connection.client``. This was previously required by the ``Simple`` classes and is now also required by :class:`Consumer` and :class:`Producer`. * Connection.default_channel is now closed at object revival. * Adds kombu.clocks.LamportClock. * compat.entry_to_queue has been moved to new module :mod:`kombu.common`. .. _version-1.3.0: 1.3.0 ===== :release-date: 2011-10-05 01:00 P.M BST :release-by: Ask Solem * Broker connection info can be now be specified using URLs The broker hostname can now be given as a URL instead, of the format: .. code-block:: text transport://user:password@hostname:port/virtual_host for example the default broker is expressed as: .. code-block:: pycon >>> Connection('amqp://guest:guest@localhost:5672//') Transport defaults to amqp, and is not required. user, password, port and virtual_host is also not mandatory and will default to the corresponding transports default. .. note:: Note that the path component (virtual_host) always starts with a forward-slash. This is necessary to distinguish between the virtual host '' (empty) and '/', which are both acceptable virtual host names. A virtual host of '/' becomes:: .. code-block:: text amqp://guest:guest@localhost:5672// and a virtual host of '' (empty) becomes: .. code-block:: text amqp://guest:guest@localhost:5672/ So the leading slash in the path component is **always required**. * Now comes with default global connection and producer pools. The acquire a connection using the connection parameters from a :class:`Connection`: .. code-block:: pycon >>> from kombu import Connection, connections >>> connection = Connection('amqp://guest:guest@localhost//') >>> with connections[connection].acquire(block=True): ... # do something with connection To acquire a producer using the connection parameters from a :class:`Connection`: .. code-block:: pycon >>> from kombu import Connection, producers >>> connection = Connection('amqp://guest:guest@localhost//') >>> with producers[connection].acquire(block=True): ... producer.publish({'hello': 'world'}, exchange='hello') Acquiring a producer will in turn also acquire a connection from the associated pool in ``connections``, so you the number of producers is bound the same limit as number of connections. The default limit of 100 connections per connection instance can be changed by doing: .. code-block:: pycon >>> from kombu import pools >>> pools.set_limit(10) The pool can also be forcefully closed by doing: .. code-block:: pycon >>> from kombu import pools >>> pool.reset() * SQS Transport: Persistence using SimpleDB is now disabled by default, after reports of unstable SimpleDB connections leading to errors. * :class:`Producer` can now be used as a context manager. * ``Producer.__exit__`` now properly calls ``release`` instead of close. The previous behavior would lead to a memory leak when using the :class:`kombu.pools.ProducerPool` * Now silences all exceptions from `import ctypes` to match behaviour of the standard Python uuid module, and avoid passing on MemoryError exceptions on SELinux-enabled systems (Issue #52 + Issue #53) * ``amqp`` is now an alias to the ``amqplib`` transport. * ``kombu.syn.detect_environment`` now returns 'default', 'eventlet', or 'gevent' depending on what monkey patches have been installed. * Serialization registry has new attribute ``type_to_name`` so it is possible to lookup serializater name by content type. * Exchange argument to ``Producer.publish`` can now be an :class:`Exchange` instance. * ``compat.Publisher`` now supports the ``channel`` keyword argument. * Acking a message on some transports could lead to :exc:`KeyError` being raised (Issue #57). * Connection pool: Connections are no long instantiated when the pool is created, but instantiated as needed instead. * Tests now pass on PyPy. * ``Connection.as_uri`` now includes the password if the keyword argument ``include_password`` is set. * Virtual transports now comes with a default ``default_connection_params`` attribute. .. _version-1.2.1: 1.2.1 ===== :release-date: 2011-07-29 12:52 P.M BST :release-by: Ask Solem * Now depends on amqplib >= 1.0.0. * Redis: Now automatically deletes auto_delete queues at ``basic_cancel``. * ``serialization.unregister`` added so it is possible to remove unwanted seralizers. * Fixes MemoryError while importing ctypes on SELinux (Issue #52). * ``Connection.autoretry`` is a version of ``ensure`` that works with arbitrary functions (i.e. it does not need an associated object that implements the ``revive`` method. Example usage: .. code-block:: python channel = connection.channel() try: ret, channel = connection.autoretry(send_messages, channel=channel) finally: channel.close() * ``ConnectionPool.acquire`` no longer force establishes the connection. The connection will be established as needed. * ``Connection.ensure`` now supports an ``on_revive`` callback that is applied whenever the connection is re-established. * ``Consumer.consuming_from(queue)`` returns True if the Consumer is consuming from ``queue``. * ``Consumer.cancel_by_queue`` did not remove the queue from ``queues``. * ``compat.ConsumerSet.add_queue_from_dict`` now automatically declared the queue if ``auto_declare`` set. .. _version-1.2.0: 1.2.0 ===== :release-date: 2011-07-15 12:00 P.M BST :release-by: Ask Solem * Virtual: Fixes cyclic reference in Channel.close (Issue #49). * Producer.publish: Can now set additional properties using keyword arguments (Issue #48). * Adds Queue.no_ack option to control the no_ack option for individual queues. * Recent versions broke pylibrabbitmq support. * SimpleQueue and SimpleBuffer can now be used as contexts. * Test requirements specifies PyYAML==3.09 as 3.10 dropped Python 2.4 support * Now properly reports default values in Connection.info/.as_uri .. _version-1.1.6: 1.1.6 ===== :release-date: 2011-06-13 04:00 P.M BST :release-by: Ask Solem * Redis: Fixes issue introduced in 1.1.4, where a redis connection failure could leave consumer hanging forever. * SQS: Now supports fanout messaging by using SimpleDB to store routing tables. This can be disabled by setting the `supports_fanout` transport option: >>> Connection(transport='SQS', ... transport_options={'supports_fanout': False}) * SQS: Now properly deletes a message when a message is acked. * SQS: Can now set the Amazon AWS region, by using the ``region`` transport option. * amqplib: Now uses `localhost` as default hostname instead of raising an error. .. _version-1.1.5: 1.1.5 ===== :release-date: 2011-06-07 06:00 P.M BST :release-by: Ask Solem * Fixes compatibility with redis-py 2.4.4. .. _version-1.1.4: 1.1.4 ===== :release-date: 2011-06-07 04:00 P.M BST :release-by: Ask Solem * Redis transport: Now requires redis-py version 2.4.4 or later. * New Amazon SQS transport added. Usage: >>> conn = Connection(transport='SQS', ... userid=aws_access_key_id, ... password=aws_secret_access_key) The environment variables :envvar:`AWS_ACCESS_KEY_ID` and :envvar:`AWS_SECRET_ACCESS_KEY` are also supported. * librabbitmq transport: Fixes default credentials support. * amqplib transport: Now supports `login_method` for SSL auth. :class:`Connection` now supports the `login_method` keyword argument. Default `login_method` is ``AMQPLAIN``. .. _version-1.1.3: 1.1.3 ===== :release-date: 2011-04-21 04:00 P.M CEST :release-by: Ask Solem * Redis: Consuming from multiple connections now works with Eventlet. * Redis: Can now perform channel operations while the channel is in BRPOP/LISTEN mode (Issue #35). Also the async BRPOP now times out after 1 second, this means that canceling consuming from a queue/starting consuming from additional queues has a latency of up to one second (BRPOP does not support subsecond timeouts). * Virtual: Allow channel objects to be closed multiple times without error. * amqplib: ``AttributeError`` has been added to the list of known connection related errors (:attr:`Connection.connection_errors`). * amqplib: Now converts :exc:`SSLError` timeout errors to :exc:`socket.timeout` (http://bugs.python.org/issue10272) * Ensures cyclic references are destroyed when the connection is closed. .. _version-1.1.2: 1.1.2 ===== :release-date: 2011-04-06 04:00 P.M CEST :release-by: Ask Solem * Redis: Fixes serious issue where messages could be lost. The issue could happen if the message exceeded a certain number of kilobytes in size. It is recommended that all users of the Redis transport should upgrade to this version, even if not currently experiencing any issues. .. _version-1.1.1: 1.1.1 ===== :release-date: 2011-04-05 03:51 P.M CEST :release-by: Ask Solem * 1.1.0 started using ``Queue.LifoQueue`` which is only available in Python 2.6+ (Issue #33). We now ship with our own LifoQueue. .. _version-1.1.0: 1.1.0 ===== :release-date: 2011-04-05 01:05 P.M CEST :release-by: Ask Solem .. _v110-important: Important Notes --------------- * Virtual transports: Message body is now base64 encoded by default (Issue #27). This should solve problems sending binary data with virtual transports. Message compatibility is handled by adding a ``body_encoding`` property, so messages sent by older versions is compatible with this release. However -- If you are accessing the messages directly not using Kombu, then you have to respect the ``body_encoding`` property. If you need to disable base64 encoding then you can do so via the transport options: .. code-block:: python Connection(transport='...', transport_options={'body_encoding': None}) **For transport authors**: You don't have to change anything in your custom transports, as this is handled automatically by the base class. If you want to use a different encoder you can do so by adding a key to ``Channel.codecs``. Default encoding is specified by the ``Channel.body_encoding`` attribute. A new codec must provide two methods: ``encode(data)`` and ``decode(data)``. * ConnectionPool/ChannelPool/Resource: Setting ``limit=None`` (or 0) now disables pool semantics, and will establish and close the resource whenever acquired or released. * ConnectionPool/ChannelPool/Resource: Is now using a LIFO queue instead of the previous FIFO behavior. This means that the last resource released will be the one acquired next. I.e. if only a single thread is using the pool this means only a single connection will ever be used. * Connection: Cloned connections did not inherit transport_options (``__copy__``). * contrib/requirements is now located in the top directory of the distribution. * MongoDB: Now supports authentication using the ``userid`` and ``password`` arguments to :class:`Connection` (Issue #30). * Connection: Default autentication credentials are now delegated to the individual transports. This means that the ``userid`` and ``password`` arguments to Connection is no longer *guest/guest* by default. The amqplib and pika transports will still have the default credentials. * :meth:`Consumer.__exit__` did not have the correct signature (Issue #32). * Channel objects now have a ``channel_id`` attribute. * MongoDB: Version sniffing broke with development versions of mongod (Issue #29). * New environment variable :envvar:`KOMBU_LOG_CONNECTION` will now emit debug log messages for connection related actions. :envvar:`KOMBU_LOG_DEBUG` will also enable :envvar:`KOMBU_LOG_CONNECTION`. .. _version-1.0.7: 1.0.7 ===== :release-date: 2011-03-28 05:45 P.M CEST :release-by: Ask Solem * Now depends on anyjson 0.3.1 cjson is no longer a recommended json implementation, and anyjson will now emit a deprecation warning if used. * Please note that the Pika backend only works with version 0.5.2. The latest version (0.9.x) drastically changed API, and it is not compatible yet. * on_decode_error is now called for exceptions in message_to_python (Issue #24). * Redis: did not respect QoS settings. * Redis: Creating a connection now ensures the connection is established. This means ``Connection.ensure_connection`` works properly with Redis. * consumer_tag argument to ``Queue.consume`` can't be :const:`None` (Issue #21). A None value is now automatically converted to empty string. An empty string will make the server generate a unique tag. * Connection now supports a ``transport_options`` argument. This can be used to pass additional arguments to transports. * Pika: ``drain_events`` raised :exc:`socket.timeout` even if no timeout set (Issue #8). .. version-1.0.6: 1.0.6 ===== :release-date: 2011-03-22 04:00 P.M CET :release-by: Ask Solem * The ``delivery_mode`` aliases (persistent/transient) were not automatically converted to integer, and would cause a crash if using the amqplib transport. * Redis: The redis-py :exc:`InvalidData` exception suddenly changed name to :exc:`DataError`. * The :envvar:`KOMBU_LOG_DEBUG` environment variable can now be set to log all channel method calls. Support for the following environment variables have been added: * :envvar:`KOMBU_LOG_CHANNEL` will wrap channels in an object that logs every method call. * :envvar:`KOMBU_LOG_DEBUG` both enables channel logging and configures the root logger to emit messages to standard error. **Example Usage**: .. code-block:: console $ KOMBU_LOG_DEBUG=1 python >>> from kombu import Connection >>> conn = Connection() >>> channel = conn.channel() Start from server, version: 8.0, properties: {u'product': 'RabbitMQ',.............. } Open OK! known_hosts [] using channel_id: 1 Channel open >>> channel.queue_declare('myq', passive=True) [Kombu channel:1] queue_declare('myq', passive=True) (u'myq', 0, 1) .. _version-1.0.5: 1.0.5 ===== :release-date: 2011-03-17 04:00 P.M CET :release-by: Ask Solem * Fixed memory leak when creating virtual channels. All virtual transports affected (redis, mongodb, memory, django, sqlalchemy, couchdb, beanstalk). * Virtual Transports: Fixed potential race condition when acking messages. If you have been affected by this, the error would show itself as an exception raised by the OrderedDict implementation. (``object no longer exists``). * MongoDB transport requires the ``findandmodify`` command only available in MongoDB 1.3+, so now raises an exception if connected to an incompatible server version. * Virtual Transports: ``basic.cancel`` should not try to remove unknown consumer tag. .. _version-1.0.4: 1.0.4 ===== :release-date: 2011-02-28 04:00 P.M CET :release-by: Ask Solem * Added Transport.polling_interval Used by django-kombu to increase the time to sleep between SELECTs when there are no messages in the queue. Users of django-kombu should upgrade to django-kombu v0.9.2. .. _version-1.0.3: 1.0.3 ===== :release-date: 2011-02-12 04:00 P.M CET :release-by: Ask Solem * ConnectionPool: Re-connect if amqplib connection closed * Adds ``Queue.as_dict`` + ``Exchange.as_dict``. * Copyright headers updated to include 2011. .. _version-1.0.2: 1.0.2 ===== :release-date: 2011-01-31 10:45 P.M CET :release-by: Ask Solem * amqplib: Message properties were not set properly. * Ghettoq backend names are now automatically translated to the new names. .. _version-1.0.1: 1.0.1 ===== :release-date: 2011-01-28 12:00 P.M CET :release-by: Ask Solem * Redis: Now works with Linux (epoll) .. _version-1.0.0: 1.0.0 ===== :release-date: 2011-01-27 12:00 P.M CET :release-by: Ask Solem * Initial release .. _version-0.1.0: 0.1.0 ===== :release-date: 2010-07-22 04:20 P.M CET :release-by: Ask Solem * Initial fork of carrot kombu-4.1.0/docs/0000755000175000017500000000000013134154263013511 5ustar omeromer00000000000000kombu-4.1.0/docs/_ext/0000755000175000017500000000000013134154263014450 5ustar omeromer00000000000000kombu-4.1.0/docs/_ext/.keep0000644000175000017500000000000013130603207015354 0ustar omeromer00000000000000kombu-4.1.0/docs/conf.py0000644000175000017500000000161613134154177015020 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from sphinx_celery import conf globals().update(conf.build_config( 'kombu', __file__, project='Kombu', version_dev='4.2', version_stable='4.1', canonical_url='http://docs.kombu.me', webdomain='kombu.me', github_project='celery/kombu', author='Ask Solem & contributors', author_name='Ask Solem', copyright='2009-2016', publisher='Celery Project', html_logo='images/kombusmall.jpg', html_favicon='images/favicon.ico', html_prepend_sidebars=['sidebardonations.html'], extra_extensions=['sphinx.ext.napoleon'], apicheck_ignore_modules=[ 'kombu.entity', 'kombu.messaging', 'kombu.async.aws.ext', 'kombu.async.aws.sqs.ext', 'kombu.transport.qpid_patches', 'kombu.utils', 'kombu.transport.virtual.base', ], )) kombu-4.1.0/docs/images/0000755000175000017500000000000013134154263014756 5ustar omeromer00000000000000kombu-4.1.0/docs/images/kombusmall.jpg0000644000175000017500000006025213130603207017624 0ustar omeromer00000000000000ÿØÿàJFIF´´ÿáüExifMM* z Œ–ž(1¦2‡iÖNIKON CORPORATIONNIKON D50´´Adobe Photoshop CS2 Windows2008:03:24 12:01:10!‚šh‚pˆ"0221xŒ‘ ’ ¨’°’’’ ’ ¸’†,À’80’‘80’’80 0100  @ @¢¤¤¤¤ì¤.¤¤¤¤ ¤ ¤ }2008:03:05 13:23:212008:03:05 13:23:21ASCII ÿÛC   %# , #&')*)-0-(0%()(ÿÛC   (((((((((((((((((((((((((((((((((((((((((((((((((((ÿÀ@@"ÿÄÿÄW !1AQa"q#2B‘¡Rb±ÁÑ3r‚’¢$%C²á4Ss“ÂÒDUƒ³ñ&'56Ecetu„”£ðÿÄÿÄ6!1AQaq‘±"2¡ÁÑBð#á3RSb²ñÿÚ ?õMQ@QEQEECêkÁ²Ân@Ž_ p ¤+nñªÿôídöZ³áõãùPŠ*Oß#^¢—Ëo áÖTFæÏòõ©Š¢Š(Š(  (¢€(¢Š¢Š(Š(  (¢€(¯™ÄWÜЉRGR΀¤‘F>4TVÄ'ª’>&´?># *zK(Íb€ê¢ ßÔö–‰Wx¡àÚJ¿å\Okàᘒê­©™ -£HVÎROq;Xç+Q_*=Ip™5–½´ -Ä©h Ÿy42(¯‰û"¾ÐQ@QEQEEPQ@W5ú´Ä¥ã”‘þ!Kuž´ÐÕk±Jm¶ÒéZyI8ã©#áÖ•Ò»Æã©qã;-Á‚–Z - ñR¶#wF¶$É·Ìj\7 RÐø,x¥CÄUÞ߮Р”Ì€êUâ¶”ŸÀœÒ©w ™Óâ3s6Õ¼ó]l)†ˆÀZRï„õXÀ<ÔäÙ§Lu•¡µì݃´ñR~ðÁê*±’“ÐÖxgr_ëÄbÎÖL%ƒÇÖGUû‰?•Vç_®³‰ÍövÏû¸ÉÛþcÍVn·+ÄY…‹6¹_†Ä«ÚZ”†Éû¹#95:ç®Ó»¾cEétÓ½¡Ñòóò£ijÿa=#«/mOœØú»ŒÏûÌÿ Þ/WDŽ.R½´ÿ V?:ú–rgk–XÉ{vÑÕރπ­¸ÜÇõ}µF'ÉvRG´Šê¼Ñ²á3µjÉѨï û7Å”á[ÛÕ·vÆ â»ê¶ÈýÄR•™úCý[µM-(ø P3þZì;´ŽêF€¾ ôf_rµ|9N Jœe³^h£áòÇxµâŸàl1­''úøq–< k)ýù®Öµ³GúÛ|€`¥_Æ•_u,&ʯ}œÝ’Øÿ}i–Ü´ÿ‡¯ç\Ší+IGPEÎEÞÔï‹sín Ã5&tÇcZÂÞ¡ï·)¿í7ü«KúÍ‘gƒ!~®çIÅö§ ›N¤ŠptÃP]QÏÊß]F»Èô®šÔ—¹ NR¥°!±ñS‹ÎMÎêÙËÈf«ò7ŲwOgüdÿÖõ­æò§ØCCõžy(ñP¤ûú?N<žî6Œ×ªtž7Ì ~uœ}jS¨[˜^dá>ñ¸]„çÌóÒ§Wú‚¿ÒeNš_6—Ü¿\µ˜µö–£²Æ)à¤Kï•ø7º¡[íOÉp}ý¨|mÖÕdqò5ªÙ¦&Â?ìÍ/¢¬Œ:ê=µô| ÝÍO9d»Îh·xÖ—GZ8Ë6öSTs7²EþÉþž1øæ—…¿E_Sû¶»u’ä Ñö›íÍ>èó(NÜ|0jéMxë‰ÖZ —жû*R’œùoÚ}Z?¡Zl¯¼•UÅÞäù‹pþDWRôΞq®év ovxÂZÚÄj=þÅæßà”¸e¼›ðIz²±#U^lá»G´1m‰-A¸÷Ûz‹±J¼7€r3çáåVË_uån>s¾ohhRw`…ô#Ÿ ¯\ímixލ!ˆ‡’C7;L…wžÌÀqóŒà‚9·öjã–kìý&J¤7n-ÜlÒûø+!E9ñÛž>~U0Ÿí{ÿ5O°¯A)Â\Ñ{:­V鮄(¯‰ä ûV0 (¢€(¢Š¢Š(Š(  (¢€ƒÕŠ[vä-“‡âJ¡È@L€¶†A%XÆ1žµwÖŠ)µ ¤áAÄic­5+f³OØäÝî«mJl(´ÌDŸ¼VxY<á øU“*Ñ]µ©A2ž{.›ZV¤¸£–ÐÒ‰*^>öì”ã°´^já*¦Úe¶\n2RÚPè>òRçD%Y$$ôÅXow‹n5¢R”縕4ÒIîÈ÷Ñï#À޵ hµITÕ¸—ìÑ.´ž# övTæxæ}åq\Î2Œ’‰ëÆX²B^ÓK]wîzwÿ;e®vHw—~ænM«ºJW ™e  øƒ·©çqZ¢i-7å›UÖ}Jtþf¥‘ˆ Û¿hß³ìîñÆyÆkbMk˺WàqLjÍòFM%Ñ:_CSP 2–m¶ä'¨HŠ‚â+5D„±…ÛmÊF#Ê·¡ R°0ÚVâˆJ=TxU¼ëë°©¸«]ÖPû‘¸i'ÕÃü¨žHcW7Eðb⸩òâNO»îÉÕXìï¦ÉkR€ˆŒŸÀT]ëNhø(Ý{·Ú äg p¶¼z%'?•./}¡ßg¥m7%6ØÊ㺆6“ñ_Ú?,Tt-#~ºÅzà͵çJKŠ~B¶€2q¸åGá\râÔ´Å >ƒè¹ñ%>+ˆä]ÍýËs·-nQ6{ž¤Œà9ÿf¼´£å»Ùµ0ÁJÕÀè&>Ö?.iH\ÊÕpG;¡´úõý¶mï~ZÒqµ>>žrÈÉ)¥“}dz›ôn“<œ’VÛzý‹ãÝ®ä,B°ó®é²>a ~úƒ“ªµ–µxÛ 8âšsƒv Û_—ÄÔŇLécÚ Ų́ïMn3)[ }óÀ}bHN7u>§ÝyçzÍ HCmÿö5=Ù#‚ ‰$Ø> s>•ÑË–kü“µuHò=§ÂÉ.½IÜ­¤žÏ[ûxŠýU¥ßÓ`ÅšóKö¦{Õ)¤û­H)ÄŒu®ÞÏí]¢ÙT”€ƒ'xNs€”š¸v‡j6Ê-æZ7;#ÙUÞ0Â07#¾VÔ08=im¤/(°jH7PÊdˆê;šÎ ’F=FkŸ,,ɯ‡CÛá8‰ñÜù¿ä¦«mî¼Ñép ©ÐúaXÚ¿ ç\‡qîcûf¸­“ Ýaµ6ÖúdEt{ªÆ“â•+´¶q’1^Ú—2´~o5&fk#ÕøšÅ\òy>µšÎR½]&"ÜWÕ€O’jÐäxqY]¢Ù $)Æ‹hýejuQ#ä9)¡Ôf›nN¬Ô ù³œcáZ¼ÛÎ_.`g ­s¯ jÔgÛ-¢áÀZ[\”²‚zï< zÔZ{ã%«Go²51—¡ÉH\y-)—|RE,­N9ûÙ]Íåj6]‚IðR2BA>™5aV¯ÔÍÝlLHÒQaƺÌLd:‰ˆh)*Ü Ià€H'‚¨75¦’µCZÕk™¨¤]›J¿Q„§+Ï\7þœ¾$×yÕÿ‚p}©¯W›=6‘„€:+*øžR }«œÁEPQ@QEQEEPÝbУ'¡SèF|²qI‰úš êר>“²´çôZîˆÑÕß(îY$G‘\ŽiÕ«Y„3õ–ÿ}y¥¢§ûW$ðu#Cüæ«98ŵÑpÐYrÆÙ´‹v·’Ä};srdÙ°øKN=h}ãÁ …²‰¢u úê]~5®ìQYÓŽ„¼·€à÷Ç•x¨ÎØdýD;xV2 ¥¤þÈf¾ée£KÙ,̘˕r¾<qX(oê“áÆy¬\ÛÊãuêzxø\k‚YeiÉÒ»Ùo³]–¦.¯Ùíq£ê…×P¾¥)˜Ò;Ç>êO$g+=k©Çõ:¡cé‹\gÁnxôÀÆƸµD‡´Ý²íu±ÀeË“„&D’2¤£ ^Qè8ñ5Dв¯÷\Ë­N“%¤s-N·°Ž· 'À œ“嚆¶û?š”á¸Ok‚|DyTc«æozÙ$ü¬×«/—W%ÁÔilàžÐPÊ3Ñ@=h³ÚbôhS­×™}H)»„¤ú…Áý“Gj“cÌÕ¨¥*Lá‡Fìçn}*.Ç¥_ºAvå%q­V¹zç,mlz uZ¾Á$ÞG¹©ŸM…ÆrN^Í5ѵMíTÓ,÷+žšÒ/¬X"µ"þNÔ÷*/4½ wì×Jo:ê#ºj]CÃcræ”(~ÃA%fªku‰ŽÎ ¤:=Åê ‹AO(ø÷-=MU.ºûN].2]¸Mpnö©+.8yÆx*îŽ&õ“®å¢>w7 傿õOWòOd1\²ØÛuN¯é »®žñkFØl’yÊSʱE¹åØnh—d‚cºR´Š÷¤Œa@žO—­\-Ȉ‡ÐÐ[«e *sÞ=….õÚÒ‹äö›Ó¶éO%Ò è¹8‡@àpŸ…G]¾F_ݸ‰§ÉÉ=m¥^ BËc¿µ¸ÒÆäBqJöÂRVs’¥y]|k¦~ª~ñ¡–Tbd•µŽé—íòáôÍ-¤]¦<Ú Êˆ‹ÜÃkÊ‹y9ÚUÕCãSеåÄZbdxŒ<ê£ȈÒR¦>ÐÇ™ó©Q_ èc.*åÎãªÛRè–®³­‰/¾ä†R0Ü7Lh£Ð41‘ñë[Uo—ôw´Üî"vðv[ã!ß>I¨-.†õŒ‡$.dÈÖ»IÊ‘1ýãp°3Àäš°YõFŸÖ\-ºväò¦¥xu’êåHÏQZ¬QNìÉñù¤«ätËrKL/Râ9J, ”•}ÛFN|ÍIƆìpPZןyKYÇò o,Ù§B›m)çk²TWÝ8µ¢Æ·G4óÎZ?D0#¾%n\I 8Wtw`ÖL(™¯2ã¬)ä )M”ä Š¥­‘È–ëôfž¢ŽéÀ+ܤí^áâOåRÊD]7Úu>©‰ *NNíOI ȧ­YÅ.¦*M–ÇU-»“lÆ\T8¤÷ϯ»Oáãð¨§~š}JÎÖ†pÑ>¹T›ÚÎrkKëìÎèû¯€†ÀõÚNM\ì·KFªµ?3NLLæ‡õñœ]kžŠGQñéY5gd2Â)iãý#Ñl¹HqÄIšêPÒ¶).¸¬ƒ×ž•öM¡˜Ì)àÿx¿wjHÂI'5!¡W ˜iksH.³Ç#Ÿ_'%2- Rpã%Ĩ©5NTu,²RJéiÓ¶Žh¶ØF[©yèîìA6•§sÓŠì´Ç‰rÑm­†dÇ(‘ )RP³ö€PO½Ž<9®È ­Ë‹CÛã³mp÷l¥œp>9­7´»ocPÆ·©,Mmæ…´s±åí¾895t«SÓ”•Iö?·fâß³%N°é}Nò”m˜(w»ßÞìNÖÂUÕ %_g¦­=´nz¿Wê Âì¶[pŒ ƒã’|*‘ÖO¸d¨ðñ]dÛù$’úØëQEhrQ@QEQEEPQ@@j®E¼yÊo÷×t¼V.ö.ÐÕ&Pêe=-Â8Km©Dêpν ­V¤F†¤,>?#H®Ð,·6ÙjÕ§,ŽG»ÅL¸Î1õįœàŒ¦WPzYÕÀãçÏy*ÖÛJ«Çr…~¸9ºÌœ´ì\³î%\÷L§Ÿ.*g²×‘7]0ûëSŒÛ£gBŽxèxüMCÜlØ–eÛÔÒwÈqN£(Np‘·9¨KLùV¹ìN¶¾¦%5ö< kÍçx²)MwŸd°G‰á¥–Í*w^C³K‹œ(×Uê‡6ÖÜ[¾·Édç)?³Œ`uÍ/#ëIÑ슲Øb5·_Xeö{õ¡Dá x«Ÿµ×s¿_5;ÑáIuRJÖÌ8í„!kð÷GSñ®û­ÙŽÏíö…37[­;dÍÀ[6 ~â<æ:Ÿ èS–f”-Euë¯CË– _§ÂY8¤¥94ÔVÖºþM³薛sV5ôýÄ÷‘ì ¯†óÑrV:Urñ|¸j‹Š]Ô·%(Oú´`‘£þËmŽ>g𬕩N8ã«q×ÝQ[Ϻ­Î:¯5Ö»—´¸Þx óº!ÁTQáñ\fN&\Ù]ú/`âuЄûˆ9 øWr™qÛ?v0 H'‚+‡ /U×'¨ó5ÜÈÆ[E}Ú€<“ÔzVˆälyYBÛŸ<$E $ •".¨mWYo¶Œ0ò»ÜàJ¹Í>"\ìQ»7mÅÚ¤¶óvýˆR$grðFp}NiÚV°Úy/9„Ÿ>zÒ6’¡bÕ¿9*®nð=)$¡¶™HÙÇlj5ök‹© !]ÒHBBFJy«Ý«GÚt¥©IÚƒ¥ 't;Gs¯¨tߎ§8ã ñªÆK•KjM§ôuÔO-?Ò·CC7¸O–p~T¶Ðš€éM_n¼û;’Z‹¹Gm{ ¨) §>EKëýywÖòÃsHƒge[£[Y?VÞ:‘ö•ûª®Ür° ry­k°¥öŒ;‡mW禓Ñe‹<ÅuŽø¸<–áäü©¦u=¿]iÈÿG1 #‘‡²Ï·8}–ÏB… $øg¥yî=¢KÊh6Òž[ª ¶ÓcrÜQû©&ŸÝžéKOgöÅ;©]ö}Mz=Ó1[Ë« >¸*WAвOr­¢¡Úhi9r4¾ŒŒ-ÏD©%§†à±¬ý‘Ï*ë𤻪[Ò}å­×Ü;–ëŠ+Z™'šôÇj]Ž«T^ÛºÛ®L¸9@q’¶ž `+pèqò¤6»ÓèÝNåŽdØÓ$6Ê[‘ÒBS¸giÇOrbÖÅôÔ†Ÿ¾\4Õæ=âÎéjts|OŠ<+Y«¬íJm.ǹÉeKJ [i+ÉÀÏ$ãŸ*æì8‹¿eººÄê‰,-Å ÂBÓœÿˆ ¥Z˜rk‘ZcÞqdmçÇÿ Ï%ªhéÇžq"=äÑ'½ïJ›þ¬ „¥1ùõ®,¬=e›o–}§.… ƒT®¸$sRr ²…<´4v!?X±×cÖ«=GðãÝSlº÷°Õ u‡q„îÇ8'££/i9ªlÎeÊDCw²2m wg#ËêICy#¯>>´ÈìjÈlÚfË{»Öm¨S»ŽIuåVO®M(u\wÙ±êÈe@ÌîmZ} IÈRœRJñø~uèëœ[!/“ðBB…R=_áñ.¹bº%õ×îLÑEsœ(¢Š¢Š(Š(  (¢€(¢Š¹¬ CÈÏÖ(ÿ‘UçÏÜyv›$fœ^×.³$¼7¡œ¨}3á^‰× Rb1³íáÒœù÷J¯;öNÚ~+JQTîðY}y°Ó˱?Jû’úµ²îž¿r“iðÂÁÍ'PBYJ@‘ó8éO ãEË5ù R (𥦞1´åZÂðÊC'¹´Ã_þ—+ÜG꣩õ®+É’)v}ϤýŒ‡ Âdœº5óm*FËŒãÙõ¡´k{›[’zýCíÚ(tòÍ.#·Ü4 J”µœ©j9*'©'Ä×CïÊŸ2UÊêúŸ¸ËYq÷UÕJ>€tÇ¥jHäœá]QŠ‚QŽÇŸˆŸ‘ä›Õý;—qµ¤ŒŽG<íB~µM¥[Öàsƒé\МSKß”FÓÐú×\8ë_eÍêJ†ê*ç;7DúÎr´6rûU¶8 m*(NãÒµÆRÚœâ˜÷©>êü¼~¶"²ê„¥'ÝIîõ5Q»1¶¶œîwÜRŠ NÏ&—o•…—[BМî_tøcү׷ãžÎ G.”KJÔ„–Ö76wsðâ–ËL‚µ(¼µ(ƒš¤ªì•tfÝÁpäǘ¹ØÏ"@JO ÚrF¦jýÛ…¹ÍSm¶öa|ͳ˜ýÌ–ÓɈJ‰$,œ3ÐÕ,ÉS}éX-îÚIH©í¬&èyRÒÔvîY£-îð‡à©'î«|9«B¶++Ü^$œgp)óSJÕ?Sj8;c¬G~Z”ãÊÂÉ'ž<‰½+Nk Eé¶ Ÿc{rÙ ÚV‚0 Ÿ5w£"íXùÑöˆzrâý£DG]ûV³õ3¯s’Q?t'®qŸq<ŸYjMP­.™ñlm*ÿ©öâUæbZB¿Q)O€Oº>öNj™®ûX> ÖÍ+v;sÛ•"B” §ŠºŒ§„БÉóªFŸ½;¿dqGÙ‰ÊA$í56Vº’wÝW~Õ:e躊\‡ße¶ԟªJGêíN)|⻦ֱœÔœ“à95èSl²Ý{5½î·ô£Œ•G(ñu”“ÐgÖ‘¶ ¼^-PGx¹ršohþÐ*üªÉ.…¢z“F²= ¬¬¾v" ¨Étþ©R Íy9/—]ç.­n~*&½Ûî§›e‰ôº™‰sB›7%/!m áL¶<1’yÁâ)l÷h tT˰ˆöŽÑ‘â/²>ïuqæLá\ÕRß-¶ D¥8ÖÅ©;Ð2œ…tâ­_£ƒ&4irs†c “äv“TÛ|ný¤©;v<ò‚2xQ*Ï€5–M‘¤7c™6”H¶[Ÿ•=,”>Rò=íØà`ô® ½­m÷VÙÙ)Æ'4ó±¬%Y)8Ààd~Ø<ÓvæÌëtHZÖœmIÚäþÛ6&Q¨KD‰ ´JÐ7u8#¦qWz¥¨ƒ¦ô9¦¤\.–lŸvé­Ré ã-ÇkÀ¦½`Iýå J–âÖw g*<ÒÝ=™:ó³›d`™2ã¦|ÇŸÛ€µ””tÜx>1O&µ¹fmn)JZ–²Jºý£QH¶YóÊÒÓ𨚢Š*JQ@QEQEEPQ@VµŽ°¤ôQtýJ¯6v4¿öhõFöÀø8¬×¤õpÝ"ØŸ5;ÿºUyŸ²}ŽÐ¤ %Çg—ò âQx~ïéþ†±Ó!rã­{ürÚ×ú©#“øR'´ÛƒRu VXÜ‹M¾2X·¶x y´í½F\¨Ž°Òûµ8”¥JýŸQvÏ ˜×›Cqѵ6=N^[Ц>~5ó¦qëçZTòQ¸óއo5¹ BÓ‚wxñüªŒIûÄ„ü*Ãmu–[’Ĥ¤!Ð9O$1U¤­ s('›’ù+JÏz¤ ãt«>¡ 2C-íZο=ãR*mÄ–œn y „ŽúŒœ¯‚ÒAÆK"Z]Cm¥ J HÜ@ʇÇÊ¥Ìf%¥Ê}žð6§\eI÷T1Ñ&¸ÔÛ >”‚°eDŽ˜ÅNµoqŒ–Ôó¬¸TPÂG‘&ûÅ)%×î,g€ y ùµY*vZ:èc467·Ð…Ïüë¿Ø R•TÚ®H®v¡ãbÒòzÁJ¾9©46ØRXiǤIW(m–É'åÖ¡.¨2ÀÕ£ý vTÞ€ú]ÕZmõ;3ŠÂÞhœíI=FÞ¨¥$h2-ó_Mʨ¯G8SO0 ¤«Ë¥Z–óMÍŽóO¿ô«*; ¶•ä]éñÕêGizÒvc7p‡%ä}ÙLeCÈÏŠÑIu3¦´5¨u+6KÂ^ãýotZh|V¬L­3Ø»1ŸeZÖ{OHXÞ›D~Ðó[HþÏãQ—еµÑ%¹¦¢´x"#*ù)Y#åRÝŸ±4f¸Ö<ûÓТ³)ç ŽghÁÜr~×î©M6t[{Ka‹?eh0b5o€âša(ŒA<óÔôç'4©ì&Ω} ½pØ=ŽÎÊžWw«Høg?*ŒìÎâ©2Ò7‰/›¢é ÜX‘öòñÈÁ9§S–Øžöm{6•¯ÚaÇ\…Éy{Ò2•¨tã8 éÅ[âÔ®ÚöcV¤Ð8Œ6^Ÿ ÐÂyWxƒïñNxñ W™!7*lÖâÛbJ‘1G a¦T¥ç˦l×õM©ØîjpgÆÆådz[“.Ñqż¼£ p<\¤º hhlVààP8Á óÇ•a9Z7„iÐûyˆ6ؽܩB °­ÉÎþ¸ðùV¦¤%Ë…Î<’d5…6·IJP!$¨sá׫ºfí÷§Zzûskéhîmx­HinÿV[OˆÇ\dÕ®3M¦ã-hmJa Tz©#8üÍijJÑJqtÎMƒeµË…ºðó¬¤,w˜Üv ðzsZÇ@ô'Ö¨ÍÚ—‚ˆq'Ã++¾RD8î¥È9YIÁVGZ€IÞê7(‘Œç=*}‡’†[BÝWv¤`í®xõó©*iˆë~Ð’±¸~®qS‰âR £œ‘ïgŠ‚•%*x„Ép‘•1ÐçRñœDmÁ»‘ƒ€ŸJ†ê-’•´‰ kY-¢cÍ4Ú¸‚¼ŸšR ˾ \·Ü>!¸Ø|T¯áXÊDwZnc%m¥âR¶Ï$,zùç#žsãü*‰"Öw¢c-¡A˜«%]Të¸ä‘ükRäËy;Yp²Ñà¢*{´Ÿ‰šÔËEÂ|è3L ¦ó_HÞÝÀa%A?yÏAüêéYFè×ÙÆ‹Y¦MÅA›|P\tïÙž3Ç Ç$ÕŠU‡Ië[ÅzP~”‚­‰‘zBÕ×c›þÒUÈ ó¢™».×R%ÇL6ð¶™i*†‚z“ëI ©—£/nq.G ÌŒŽ=¡“×ûè5®‰êÎçˬ¥ÏjȶICˆtí(Xê“ëWdûVÑÒÍ´©Þ$%KÎA8qED޼ñSšƒ_vuä«ÜV¨/o6½Ìr @èqK÷R :á$úÒŸ´-yu×.ÆMÁˆ°àDR•$q¤ã%K<¨ñð¨ÑlN¬„°Ü·ê+Té!~Ï[o9ݧ*Ú•p“”…/ :ñH[_yß©-·½°‚\m= äJdv ´¸ö§Ú-5׌+ŠŒrj\¤å®c{ÍH±é[œ'£¶ÜÛjâÈqhIÚóÈPÚ´¨ý AÐñ^œÒ8æ¡')D•¤~Uæ+/}7³ÂÃÓ•>:ã­Ý„. ‰t’ÁPäç‚äs^ìêGn{?h÷n+â¦ÒOçšÝH¶ÑE%BŠ(  (¢€(¢Š¢Š(Š( *š¼âål>H}_äç^Á]²îær‡†<‰]z[/døjýXÒUþQ^výûíÏõÅO†üT_¼1Æã7Ø¿.Q#aéŠ\öÒÁwDwˆ³%'9èÅœBHð&©½¥7ÞèÀا±+#‘ƒ×áZ3 <ôæÞ äŒyÐŒ­A2|}êÒ0V9¾µ°Ûk)(g­v‡T«ovãª÷T;±ŸÔJ–Aö ’ÓÉ_¡À#Öºu–CªSŠÎ:€´2Ò$¤ „:0”¥#àôõ®[˜qéCA #HQ‘Ôš¬» G{$ ¾™,¿Œ—rh-@r:ãâ<+c rCn-¶ÔPßõ…#!?*…ögŒëÄ£Â|A©Û3ßJ[¾Š—•Þ-°´ >Ÿœã yUh’JÄØT€TžjÝ>ÿ+ÙDt¬ñŒcŠ­i¹VÆ®B<¤»qœ•aqØ!,µ5ýãè8© ªR$¨†Ë;Ž{²s°ƒŠÕ=43k]KN—+Ù¦¿Ž²?¨qi'û¼öË…L4sÕ"ºüÖž¸Ê7D,³Úîf… Û #Å<ò5 {Ò×FýªÅy»Å†âK¨÷P¶<¹´¥>ª«-QW£¾ñ㯥}F ^Å‹BDÕ°­j›½Ú Ž_•"iµÞI*à”eÛ]†Ó¦5t eŠ a0 %×@uN)Å•«ÞRSJ&ú]7dŸ©/‘­vÙ\é”wÎÐI$úSNv/gŽõ=ÞEÁÕ$ŽêKL¶Hàï>ò°|±KþÅe®ÔmnÊ}†i‡Ö§_u-¡?V¬eDN[‡i:FÒU‹¨¹Km*)oiOex÷r¼œxÔª"WÐN+²ÝCê»t¹š~"›^Þú]Õ¦÷¤ý•÷{·r9Æ3NTè¹–~ÉeikO’óJ(”¥4ó«P*ÁèzחpŸ6uÉ rl‡–óªW$•þYð«ObÓçCí"ÃÛ%ÖY•#»}²[q¼ÁI´¢ŒÛ’¾v´¥”†Ò1ŸùSS°æÃRuP¨î™è=I¬ãñ"ó~ë-Z£[®3!XÝr4A½—mu’¦œYIØó.þ¶~Ðñ¦ÿdOûE½$8„P1K‚æë´¶½¦JŠiF2‘†[ݸ…¡^*V9Uã±æÊâA)UÑÔÃöØÍ¢Š*J…Q@QEQEEPQ@Q»CYC…Cª-²ÕþA^|ì XƒÝô ¶nøžôÓçµg»ˆWGVìÓ?Â)Øû­:ÒrDÛ–~=âÏðªIÓŠ;8X§‹ÿüµÒð[éš¾CúBÏ.qH+m\§©ÀÎ¥MÈP(kÔŠáW2_)?1ŠÜó‘æÁ‚ØpAèAÅiÇ&¥.ñÌ[”Æ0[}iÀøÖ‹\M”–ýô5ÕÇÅ#Ï?*Åš£–É9$¥=|óé]+ZQBã- #z”ãCjÏ ê+Ùr$i t()G 'v©PðÍpÆÝ%ÿdoÞhû«PägÈ †ìµ[§»9å--œ[ P>ŠÆp+ºûØ­8ú•’¥ûØù×VŠÓ— ª%FµE/ËS¥>·;¶™htÞ®€Ÿ.µ-¨´6¤´ÉŠÄ8H»ƒkOÛ·)(WŠVŸÅ)½Q–Œ®\©rT·9á c>œt®)PÐú{ ÓMý´­ECà:ÓFËÙœ[-¿éNÐïàÆ9&3oàBï;£`ŸZêM˱é.¢j3 {¢A!”Ÿ‹»²~$UÔR®k¡GÒëm ßuÞ™‘—ÊNÒà¤çƒê Y•1>Ö§&0ñP*aJ '¦•ŽMgÚeYƒvrTØÏ)=ä$¯¾_v~ûn¶"¢{'·¹#µëWÜu©il«)Ü€1áR¢Ó¢-5cRÑdµZµHÕÉ·F¼Lw`­dÞ “•|NÐ}j¶Ûœ»#OÍ–X‚¸èã^Ô–ƒë$ý¼ò 1ÀÎ);Ú½âå+µ ̧ž_´Ã—ÝÆ=;´6}À<¸úÓS´Hlv¥Ù<Wkl½± ïÙO'ŽõÌŽ=3Wî*ú6Q#[˜ƒ.-Ö4FåDˆê^/¥Æ–RxIÁÆqЊæ×7éú·Q¹v»GBfÒÂZŠØCh@èTIêy&— …nJš$+ªJNü*nÚ›åÕôÈ™SÞB¹œq¤¤w®” tI÷@øMM%ØÞ£œÚ~™ËrÙe!×Ò:åX÷ñR…YÙ—ÙŽ…t®:‚öÙám¤Lp«þÑ_RîƒJíØ!PÓIJ·¡'vñÉÇ•_ÿGø•Ú¼)?ôH¯H'È„ðOãWåöW¨Ü3®šnûk¸¹ïºÜHËPW®Z!8ù Æ^¾Óz*Ý"6†ÒWÎÉÛƒJaµ0IÊ•Œð’ *)nM·¡PÕã^ê‹`)^ÙÜ3»Þ:TÕÎþcËFvÎ<Ǻ=ì|ë­”!×Vé*%KqÜà)G’~f¹A(ŠÿŽöÊA>¦¹œ¹Û:T9A…pÓ~‘ÇB|i©ØÊ€“¨Ÿx0Òó¥ŒTºvä¥ ãŒÓ#³'Óm’$HPDiåQÊÏA·ì“隬~4Lµ‹ˆ’• é¾å¦ú<´A¸ræþŠ Èã¬ý…9¹Pâ€ø¬TnZ'>]’•BR‹ÝaM,}¥oûÁ^¤û •¾ûwŒâÇÀ/oﮇº2Küm÷¯¸ê¢Š*ÆaEPQ@QEQEEP .ÚU¶Ã{9Æ,’¿p¥„—;0uë+­«ä¥ŸãMžÛ¿ú¹~ÿôI?•º+ Ù±Æ;« Î˜Š¬•׊õ:8YrÊWÙ/FZW¿>¹ü+êXuâ•´Œàx‘ø’jûs#¤çì(‘éšFë Ü‹íêà©js¸Œáf4p²”´qœ¤ãš¼¥ÊrÆ<Æíz`'U\Œw“)%@’ҽīŒøœùUM÷Ö… xõ~Ò˜² PeÄ4•¥”á#>^fµ¼àÞ¤!$¶8NOçŠË}M£‘·\RVµ­*+êO*?ÈVëLu\nðm¨Z©rczx )@=q_Ùî÷C=ìŒT¾„k¾×zm°97qþ*´V©ö-ý¦kézJõ+GhÆcÚâ[@eÉil-寒’xIç¯\皇³öÁ¨Ø±±i áZ’ÝÅýϼ´‰î€Â”<Õš©ö'ÚûHÔïõß=ÏËŠëÑÚúå¤,’âéøbÜ$ºTåÝh }Æ6#<$zÖ–gZ—Mª®KîÑ.ñ,:3í7™ŸR|›a9?€)}zö—)MÚæ;>U†$8Ïr§Æ9%;Fjj×fÔúöêäˆPî7ÙŠ9rS™RSý§î¤T§³¿c¾;o—*© ¥%ÅÃ|:ÚIÛ¸pHèqPJ£ ~D;†SŠ"ׯý„: Vä2sPš>`²ö—‘Œ§ãË-ï*mkÈ „ò8ô®oÑ¢<©žôôd-hn )Î߇CÖ˜©èk}îÚæ¢°)^Ú´™ 0d…4û‰þ·pIÉ<â´‰IhÊ'nZF4nÑ¥H~[P˜ž„<{äAidž~ue’eé+ÓËÓWHWÈo«lËaËKy}¸ÜúÓðÑï²¼´” vå>ñ†HIìo Å-Eظ¹qL8- ˆeJÙ¬•©G#r¸Ü¯@xÈæ•ÖšNH½*¨VظR¹C¼)Î7)¬“°øçΘûEК5·[Ó¬¢á4 Û›)I?·!cv?²>uÙÙ~¹k´ß-×Hq£LîÉLv²Rìe (¢IRO$ÔÃnÈ”íh´šöµ¤Û.ö׊™m0‚JXq>ëÍŒø`‰ªªÇú¹NIïHdž«æ”·HºYõ ¡ë•ŠBçÚR®®ÿ\Ö|w$äU6RW;JPa@ºR®¨Qðó¬&©ø•¯tP”9¸‚¤ƒæ)©i±­í!hm Ò”)ÝŸt…(œ~­ŒVæÔ6€¢µÒ‘æOJt°Ìõ2#˜ìGa m é u >]i\™%IZi‡Z´Fj@Pq.(©Y)®ÞÖ[ÖW&ñc$gÍJuÕŸÈ ÂÖrÒœýb€>|âŽÇœ?érxIàXBñVº*D+p“ï_qùEUŒ‚Š(  (¢€(¢Š¢Š(Š( ݸ¬'Jêž©³¾ŸŽiY£r„h´žˆÓšÿçLŸÒ e½ ¨Ô?õj“ø)m¢T\f2ÝNÕ±ŒÒsÑ+@RÇ5uâpþ÷؟֗ܰµŸ¤¹÷P‘ùÒ+VÇL=]ya¼)Òqý®=›ÿ¦:xÀ(𤎹BS­.ŠÝÏx20x÷G59†0z•G’†’À˜û‘Û?VÚÉ)áY¡ðèH;»óÀ€AõòSkmñ½8 Äç¤ùÖ©ÇTêv îÝõ`$$ú ªV‹6fµ  ¸Ž¾•mìŽ8´k ×Âxº2z8ªvãÉ<«õ©‰ØÜ6“:娦É1àéö‹«R’¥,yxŽ•h-JÉè.µÓjF¹ÔM¬aižîGβӺŠ×a†¥½¦¡]nåÍÌÉžê–ËIðÃ#G>*Èô©žÑfiÝFfj{ Æ—Jf[g/ë^YèóXà¤õRxÁô¨_¦´ôK ¢B°™×gÚ)zå=ÆIÿ©e'¬¢O «¶1Ôý¡ê}IE¹]\M½?fTˆñÒ<¶ ~u# ³Éqmq®ú®\]?i€ã ’{ÉRFAi„ÊÎzœZ¢'ËíÇ5Øô™2”ß´¾óêm–Ë«+ØÑ)Ï@<ª -JÕ/¢Æý‚À]·XÖ­ï œY厠ˆ^M3F›ÜÄͺé©i&”‰i{nD7ÏüÐO*´6™¹ê›Ÿ°Y£©Ç“‚ä…Œ1yÇx­z3L[-ÐteÆÕÙõÎÙm¥M®rW¸»(ŽTàðÈO‡JE7+iFŠÅÃL]t®úN_|’KmžvÈøT_iú:&·¶=¬ôR{ÉÍŒ\íàa¤õ;|Q÷‡#<Õ׳Cý§Ó£µÞD¶¦áÈp̨»QëÔ`¥j]´®\«bÜuoÄ:rÔÆü–‘ÔãÔV—jÌꘃ'§8Ï›:[Z5]’=‡´u¾‡bû°/ì'|ˆãÁ÷ˆðçŸßVw¬:+µ…9&Ðâ´þ¬XÜü"2±ÔìãzOš=ï0iS«´¢Ò .\¡)p³„ÌŽKŒ«âGÙ>ŠÁªÓZ–æOFz O黇Bj—aÞâßo3­n0•°ÏvdyA=O¼Íxˆ„@µÊ@AK­©¥¼•û±V¾Íµ+Öë¤v”êÌgIii‚Áüj6ô€Ý–LD2ÙD9Ηƒ»jñ°ƒä0‰ì™0Ý¢K£…ìúÑî¸GäqW¯ÑÝ©v¿dÒ¢”%Ò÷ob³ŸJ¢Ù`̸ܙ…mŒìÉoû‰a¡•/ù|M?aÉØ×g²§Hu—5…á²ËE‚xÚØGU+¡Pt¨Š&oB‡wÕÙ;b¹ê+‚Þ‹?rä8”¤%i?¸T®¿…jz³ªÃ~Ao^1±gúÖ‘JãÈÒÞ3n) S.•:Ð*Qåe]TOΙúnu¾éÙlÍ5=皟n“ô•½þè­´ ûéZÀÂ9äÕiNÑe' g&‹í:¾Ë¥%àua==ÐI~âdîXÏi}ÙíµÄjçß’ÉiøQHq³÷V²9ñÈéL@¤U1&–¥¦ÓzY•¹M¤ø<¤ŸÇ5ÉÙ,”ÿ¤8#tÖ¥ÉÇì÷îsø[-kîgJ èÜ¿’†j+³ÎÄíṘ{ˆ2"$ùáü¨ÿ˜Ñ‘ÿŠ^+Ñžœ¢Eh`QEQEEPQ@QE¥ý ‘Þh]HŸmQÿ0¥®—_»jÇí9Åz”„¦š»'~Ô‰ÿðµŸÀŠTiUezxùif¿ã{¯k‡÷ø?±hdýk‡ÍiýÔ“í)N·º~¯|¼~è§KggöÇ9¤wj5¥á{êRK<8ép6޼šè“Ù¶aD&ƹ~4†œ õ+5±®ÊµhÎÏÔì"ÉeŽ7H—%Ô‚R„’¥,ôMM1k´ß¯tŸ§mZÚÉnj-¢âÎeFŠ’Q 䇂IQÁøœU RY[žœONÈõüû¾®‰¦!Č֗n­7Æ’µ%´ ’¥«ï•Ç8#«»¶\Ë’4Ô¤Ú^J¡¾ ã“û ò~Š·-êŠóVŒIE¸Ü„Ú“r’‹c«/.3kÚÚÕæ >×Θÿ£L9c\ÍZ·ÅˆDµŽNr}p ùûjìNùÞ¥©·kÖiÃpj>¸Ó…½Èzí måœ{BÊN~ kÒ½—ß ìÖ9-»$‰pp½œuH_ºæ<…rZ‘¢õm=9I”…¤½bPd³âÞ>êÓÎEZÊΚ÷E…þÜäy[^JšÉú§P­‹l%u«.Ÿí“PYZµ VïÐÊv—”­ù(à…ÿx\uÃQW}‡\ÎR®ïr²=i=qS¬:¤¶”¡Yå;r|€©’åÕ§£/í߻Ԯ÷·+\« ÓÉq(ZSŸ‹Gü5foMvTô†\rO¶»9’¦;ÉUßrH#8Ç!—‰†Õ­ë=ÌéW}/xíÖ¶BûÆà¸m+O)B¾ðÍWšÓ'–š-#´M;deØ}›éÖ”âÆÓ1ö{†GÄ\sàT¡ªµMÏP©‰RLû‹HÙ!n”§ì!„¤8u %×í÷)‘J¨Ï-•%'n zÔæ˜y³f^XyÆßê@Q}}?™>B²sm£U‘UˆóÅ{… ­%!mcj‡‘ô«÷ez™«>¥'¨}= ‰ph§rKKN ?Ò¡53m7~‘*$HСºAiˆÁA « r>¢-©Zf£c‰úÅSÔ ”éÙVJÙì’ìó.Hœù–—ÃMÅ–£¸ºÊR 9ñÀ"¤J}ÐOZãìÒy»vk4•:ìÈ®ºX*CÝüúWkï-´‡ÂPÙ!;t ãÖ•jÑ’u¡Â¿ªº¾G[BÄqü+«²H>ÛÚ]Æa‘¼ =v¨Uÿ?:‹¼N0"Ê™!(*ŒÚÔB Â’‘ÀçÄÕï°«Bà[œTú`l>®¸wŸÁ;ʳ­MÔš‹]£bŠ(« (¢€(¢Š¢Š(Š(  (¢€Vöâù+¨ÿÕ.ÒL,‡4OÝ4ÚOÀ/þTáí©Í;¨O%VwÏà3I®ùí ¡Õuÿ¼Pþ{¯k‡iø?±sc®úGö®Æí4v-–É>¸ëO²?XþêKvÀTδR‡ûÈ­© úje±„w)©KhcÛŽ‡ÎµÆ}/ºFI<óÅr¨¨ï Q'¯\ããXGÚâOAUEÍëV÷C‹ ïãÓÃÿ…a×bÜRä9Dt/sO%ÞíM«Ïpé[â´ûòÚ‡µH}åXií©]|ißoÒzG³Kk-jüYwµGxŽø!cªc¢ˆ<¯ŒŽ*Ê6UÊŽnΧö‡~»Ánò_°…€ü×¢5‡Gê6µ$)dôÈüj¥ú@ë$\îéÓv¥ƒl¶¸Túrzó䟳0k¯Uöët˜§Óml ¶%È!Ù#hë|y:L¨’¢I*$䨜’|I«7¥"©klköp±X¥ßï:‚ë“F`:¥¬¯í”¥ “ÀüêõvíŠÎ‡MZdÝ\H'¿šàŠÏN¡ÞWÀàÒ;I黾¢¹°Û$Ï?êÜÅG~5e{L¢Üü»EÕB=ê+ˆl´ qx=GÚ8=Z=…d•Û>]{UÖgœô¢­±Üîm­ÿçyùš¦Îßh‘ ¼ûÊûï(¬çâkб;Ó¦m¹\n÷I);UÝ©['Уùר=ŠÙÜ-K1}*Ú¦÷Ëà#À€+9EÞæ±š­…¶¨³õí;ܾÒÁiÔ¯j¡È úSóW\%êNÄ,Z–~Ø÷æd%L:2₈*Ãv~¹¤jnÊae4«’Sá¶Øæã¹¨-c­ª¤Û™Vû¹cF^Ð¥¨õR‚}ÔÐ$Vr’Šm2êM&¨²éëÂçZ}L²ôibLup"ÈHÇ‚¾Ð?/ «jÍ7íH‘*Þþç’µ #H=V“áP¶y‹´\ÔÛªpAyA/¥³ïcjÒ:dUÎÙseÈE;›\|,6à>#ÏŒ§àx­ñÍIja’2´'Ê›%³y g>µ?¥£°åÑ Ö¨…´¬ ã/ ÃS÷Í\˜¨ÚÑ;U± (zã¨ËtåÛçÇ–Î ˜X^<LjùŒÖWË#Jæ‰Ý©ÛOô–bÒ=×Cn(øn(¿Íš¶vC@º\nñ¢&r,ˆnSí¨$¤·Ÿ|sÁ$gÆ«zŒ´åÙŶ¼2êZBT保ý›ÎzÉ¢zJ.lI%8!°S“ÇRxªÁ\©ô/7Q²—}ù‘¯²d{ÓeOT…Jä)ÅWíÎ}b*dç§[¯)ö{œ¾ð6”î-©*ç>>c‘UXHîU,mÇt0JGà`|ê:;tz³%»lÒv ¶¤?”©ì’H 9*<üEY®ýÒ–òd˜¨Ž“Þ•÷Š^Õu~ú­Zí©g¶1!§ˆm!À·Þ J @<„äñåV:Ù´9$êeÄ/%¦TçyŸ £ü+¡*‰ÏvÊíÅ¿¦.6ø‹ØË‰“$x-¶°¤¥>{ÈÁÅ<û;·®›aéM÷sfÿ¬¼Á¾ÊHóJv¤úƒIm([¶j˜¸%Ù0›Z&!!#vÒ:$ t  ô5¶t[œ&¥À}Æte+AÈ?ÿ¼«$lÎÊ(¢¤€¢Š(Š(  (¢€(¢Š¢Š(om²“ Nj û(³>Ÿš½ÑùšRöu¢ ½Õä)›$V0GB¥)Gò"˜_¤ÛÝÆ¾`eoÆf:ñ*y5^Ó1»ˆ/7Œw.¦?Á §þu+tZ:Bo»îŽ—S– èI^•íŽ1¨­`,¬½ ‚I'ƒŒdò~tñHC§a^PBzšNvÀ¥ÈwO8ãE™A§XÁA9ÍL¶2‹Õ $´µ<@Á>œæ¶IÚˆ@ç*WκR”2=ä•9ÁÁÈâJ—î¨,ƒ€ pùøUæ…—³Å¡=¥iRñ hÌ þñàVßÒçžífìË¡_R”2Ðääz|I5^~C°æÆv Û"*’óJhp‡â½Ù§hlëMD¦¤Y"@¾3Ú(;ßà{¤Àdç§5¢×C7£³Ên²ã)§Ûq§SÂâJT> ô«e:\jíyl¶:œÂJ½¢b‰ÀK(åYøã:á×°.¶ýkuF£J“=××!kYÈq*;‚’|AŠnvmVˆìwRjÇP[»Ü£m¸rÛ ;qû[³òJÄ¥¡Cí/X.ùªg;g&-® ¡‹lx¿TÛ-¡\©)NTyÍuöƒqö>ÕgNL°Ú”ôG^Ãd©#ºoy<|zUÈ•ñ¥6­¼©'γ¼J\‰.?*K²$¸AqçU¹JÀÀÉô¥=,Šè;5màê°«ª4«uIÛ!JD—Ç‹C†ÇÇÆ¬Š¹ižÖ4µà·ý§ * ’Ú**’’¤).§í ãùò+ÍÒÐ#ÉKZ†Ä…í<ŒüÅ8¿EØèvëªÊ†Bâ6Éø(ò*·Lµ%E ˜ýëL8@Ü´,`}¯èH @ðô­ !jû¤ò•©N<2kbÖH ¸ƒâ@ãå^sOc¿¼Êâû*%*\Œ6õPò­2t’¸áè«cbxy¶ÃkJ?iIÁ>›«ŠT‚Û‰q¤µr|q[l+—&C‘[^ô:ŠÍ›AðÜOOCÅtãUÎlŽäHνMnÇ §¦Ë]µ/(EŠ—6ƒƒï¯h'žªÎk®e¶Ë6Ž3©tó€·½ICñd4¯©$,øpj¹w’—ÖÊ e¨ì$Gm^í©NyÏŽIÍq¥m¯rv¬w'5h¾Ò­k¡×$°ôÆÓ /Í2†ÂÝNÒæÑʱà è:ÓïèšiÈÙ->›„•'ë³”cÄ`qÖ—±äŒ©·{Å §Œ¨œ!ñ«UêmÒâÔ7ÇQ¡6ÃHQ” à=i¢]½fYzÈe¡´—å:’þÓ•´¡’z`ã ªëqvtA \© ²æ8ä`|(¼\ڛ액¦+ÞâBàóS½Ÿ!2¯öVÊw¸©ˆy(*Ú 8ÏÊ«ÝÚYªV5¡û–Ññ‘O­KY’¥8 20€AçRÑdû2¦ÇcÚ–`6ܶÊZBó÷PpryéZ#Û›‹IR!Æl9߬¹ï«]®¿²ñ~\—ŒŽõÔï)Ú>å]}N4pˆ¼¥ï¬QRJ\Vå$“Ó5ÝÙ]úTNÔäÙ;ð›mÁ™’ÉàI)ùšåB˜]ÍÄI Iއ@Vr}ãÉÏ¢´Á{dÒŠV ¤\ŸÚi·üV¯Â°žçf%Í >Êõ=8:Q@¢¤Ì(¢Š¢Š(Š(  (¢€(¢Š'úD¹5hnàôræÄö½v÷¡,AÈ×Um%È—"âT|Pà÷ +¯ô”u§£ºÓÈ[ÌAj<ù-6­«SI{ið>µŽšJ®¥ß_9“{t<1„2žÌã©§îwú~M±ÒÓ›­Wߥ٦ð¶‘~Òøa®œÉdñ‚<Óô¶”_K‘•)žý m )ÁÆIÏ¥R;I,NÑÓ‹èˇ1¼)ÂüŽ£Ï¥hö³‘hФœ‘Êræy@¦´¤§fà„ $rãë[ÔVì·ß•¸>Ñ?¾¹œãƒX£cäqÞ¼U © òJ@É•Õf—& ¸ó Êv$ØÊïy“ï¡^~£ÌW h÷ÈÈæºa%A.) Ž ãš–@믴î£Óqßí2Ùf;¤1-1Ôâ=IÚ‚Žz§%$øWOnSò— RÔ»lÉm©jJ6Î=ÀS÷FqÅ)/iZá²QKc¦À”Ÿ‚¸ZêwßB¼ä7ßy)Bs€”ä+¡òÅLKˆ:p°óo ‰w¼Ž¦ÐÇXÝÎq\hmhœô—¤²Á+QN}âx£%É~\•¿!@)DOÙ Ñ^ê9ïÞvsÈQÝ’y1ZØVçÂÖìœ\ŽC® pTGr¦QeEm>”9Ÿ Q –ÎÏtãššeÙ Èj¡ÄSÈyàJŸu?3Q²Äˆ ©7Ô™­û¥¨zŸOR¶ÍUj³iÅ3kvR®O,©àë)Ù’’ *Ï#ž…·Ý¢}+mÑ—. 4ú]}½Ä)ü}Ò µ%7"îZìn·À,K€âÜÞ•µh6’Vp2LóWÞÌ,‹jû`~1rŠp4—A8ÎÞ|¸=*-/ÁÕeMSÁ¹—‚šzCIVöZQ@Ú‡@®‡Æ­ú.Òý–å)ëŒ`‰OmJ7a[r°xÆAéÖ²Œ_2/).V]ÃA’ŽùÉH}Þô¡)*ÚG@+½½ÅÕ-¨~û€ã¤¯Rk$NSμCm6Õís•qÉ#Ãá\±Ó!VÕ¢á! û›“Þ²x@Ïõ®›9j ì}C ™¥"c]Öä#  çæk–÷ëf¢…ŽÒÔcNDÕ$r;ÀÚ›Rqà•uóH­ò 6Ôd¢xã,6…%Å'ýçy“ƒâjØùin¼‡Ñ½••§Í5IBͱåäÓ£ÑýÇÓ÷h—ÛDk”ïŽúw'Ì„àAÈ©$úÒ{²Ë›–mK|Óò\ ŽÔ æ@KlüT¨O™§5TíÉã—+î&­} ¨¢Š’¡EPQ@QEQE¿·;+/Ø>›R7&N1-> ŒàÚ¼“…QKþÊæ¸žÍ ¹(…=o/EÏîÉJqøW /R£BµK‘8f2%ÀFr:c´‰·@JûF7|œ4’Nß{žO$úÕ£Ô‰ltÅ$á÷ÛågqÆå+œT&¾o¾Ñä¬!J·¥* …ž=<ª~ô†ÜîK¡ÂP¯« «=æ¢/¥3-³~<ÂàeÖÂZP)$~"­Z·©æàêK-º–ÒÚ÷R 8\õ­/mH ÜrO5Ûnm)Ž”ÈFk `r²=+l¦#¸–½Þì©=à-õùŠÁ½MÒ½ˆ­é éÈT…ªâ˜l8[[Yt) q’â° T½U µ6M™.;¡3®-JD„.*[ݰx ‰û õÍrI˜ê¢ÉD6ÄXcoÕ»ï¸á=|1é[£E’×ß¶òR\w{$m@ýløzÔK×)6´÷ï1}À<Óâ SÞLŸu¡³Øf¶Ô·Ñ,–kíætËWvèC/rI$dõ8lj­sµ¡ÓÚ¯_é÷Ÿö'.ËB[ž¬­,©!$%Iç9P$ óK­5*óh½Úï᫾Œèu8ïœ)'Ë#ίšÆí¥žÔÿN'NM›u˜BÜ1À†Rqœä¬ôôâºÝ[2å¹Rê9.ZŽD-qhaÅnÛ&ÔåÎIS}èII'Üǧé^}½j{­ÿP;x¹O.¸77¢–škõ×|êgX\gÜ4¡º(»r[î— ”•GJÉîÁçjA98ëãK˜{OD¡µçì•pGŸ5Iäu¡hãIêN1&r‹ßG‚¥‚V>ƒ§Â€¶ÞijLdîoRŒ•äƒÐ`ç"¹‘n’ãƒbR¥¨{¡*ÝL Ù¿ÒÍo3ž†ãj÷c²FVê¥ýÑYŹ"ò¨”GìRio»º‚Ñ '¼Êsàry$ù Á0b:ÂÒˆjB”6+q)O=?¾œ.öug%ïe’J};ф$ù€Nyó¬ÝÓ¶X¬°Ò¢CqÖýÖ‡•¨ŽNBs•|kUC'—]…eÖܰäFœm¤’‚=â*ëŒ “À®¨Ú]ûÙnrY… .Bd6±’:¨ul²ÂĸÊÓ#§”œ¨²sÈ>×*ÖûOH R¶©J ÂQœíNsÏZ•"¯#ØX[û7ˆ\d•8·´¡ 7ŸÙâ¬íè 7œ6ã`nJx¤õÆå}*ìË)`¾âohtÜvAQÇr¼O5ÔóQ™@~b"Çowq¹ã¼¨uN‰Í_”§7yP¶i«HØÄ ldî’‚Cg«*8<Õ‚-¹H‚Z[óÚÁmŒºâ•žvà qS@¢\Ùf;Ï8œ÷®©Ý©iôVÆ£Éq ³¥§ÝJ)'©ÏQNT…¶h 6ÚSŒ¡`oS¡ãBEpNa–KMÇrKs ÃN*ÝϺ|jq–#YPãï4 w¡²zõÎxùWsA·$­¸éP á'=zu>Y¦ÃVDŶ´vQrRž—Ró…@lèz'åÖ»\ udx“]o0q%'!CŸ,ÖQÙ* „øñëBÈ^ßÈŠïjrT—£µDw;’Ñç!Xþ5è› ñr³B›€ Í%jÀ‘Ïç^wÕ³ˆæ¶™Àƒ&÷n·¶¿Öê•ÀÓ£H:›uÆEŸ8i Hc?°“ø€ cîüß©ÝÆ?ò%ÝýQq¢Š*N`¢Š(Š(  (¢€(¢¹.rÑ ÒÈm9Ìø iÛn¥E²Á=y%¨ oRGW_ºÊŸ'$|* ßæ4íº4°}±,²ëáGéHRÇÉDÔV·l^u–›´\ʾŽö•L”¡À‘/B3ú ~b­Ž(­Å•»85xÖ¨‰ÆI)5¾Þž§"Ï#üëÂûà„©)Ü pO˜Ånyµ£éÙçåZ[PKÈP9‡Ny©3g›W Áí‘ØK&Wµ8 ÀQç¯Â°DwØ·­Î¼ëjÊ–³±¤ÿ{ª§J·êý8[¹Ýäu­”?Þ—R‘Æãúßtz ¶Zt{ˆi,2ü‡–Ž½É H>9R¸*ÖÍý¥%BÿLÙîS®KKja†\9@oP|ø)bÓDHDµÉL¹É>ê¶¡¿_Ú«]¢Å*$rÛkpn9%gzÄŽ1]îYåñ¸¤¬t­c 1”쀞íÏØe¸”¡÷ÒÏvÞ·@q×ÇÊ‘ܣɜ †&ç*f@Isæ 0kѯiÙÒ˜q¦ÚZè(%§9#Ðç ü*·s³@Ò©e7G-Vô€JL§ÚC¤zJÕùÒI¶LhXج—kªûÆ"µ*VKލ¥ ¬AÅ1,º*-¶0aHâÉ/ÜiENƒÔ }Ôü+Q×ú*R„jt‹‚¿ô¡mvSmÙŸR*N˨U¨çµ Mö’ÕâZ‚j ËyNå n÷ÈjxëáPí:«7Ǹ¹9¥]Îß…*(š£M½÷º}…Hm#jí«^Õ!¾J»¢O¼Içž*µ§»/Mòk—-«…f-²úJ\qCíDƒøÓŽß|¨T¸·ˆ µê&Õ±è $oXê[?ÃÇÃ5•Õ-ÚPó,¹&dÎïÑí4§Þld{ÊÚÔóŸ{TãMÞ…òáÉÌ¢Ó·UÖÓÙ¢Ád·[=Ø‘!àxuGðg@Âp”ÊŠ¨V56šŠÀiù“˜| ©NAP >?[ãêK…ÅîO@îæ¿â~u0ž7³^c/ÅGW¥àH÷-÷®:9V7«¯#ư ´ ¶2Úw’'÷Ö…Ýí»°‹¥¨Ÿ3-κX—Ô‚Ü»k‡ö&¶ñVŠIìÎgƒ"ÕÅù0­Å™ CJ;r¢žxϨ®ÖmÌ8B”ÊVñาI8’²Ña‘öžxý”ùŸAZ.:ÂÆÄ€Äy-Üg«Ýj­%Õ8|²2”þ5PÔ÷yP¯6ç.P‘;V¬æË¥ã+zc(ÿ¾±â:óŽž8¤§Ò;úx›bá¹k&tãÍ›îK}{Hï£Ü{Vè½//&c\Õ½¼÷ny´+Èàƒæ)ÃnÜñ'rÞR‰©9VѺqzz4Ù7ÄíEuW{v˜B×Ô4$#§¯Ãl±$}/ [Á!GÝÁÏÙ51,ieÊòäs{·üò/ÔQE@ (¢€(¢Š¢Š(«™JvëkŠæ=jRŠOE¨cü:³ÕsYZ§]aÇM©æ’Ó¡dºœ¤Ðôü*Q ó¤)Óug넎ñW­7}Z´y(*ûÃÈÝM¸Èná=Â*ÃHi/o2@Ï皸vr«„ç%Ê´½à¬oŸk¸÷ w7#WÚ´ ©Û qé[í+0å4æÓú®¥»sü"¡&¤åÛ_C§ÚÆxcŠZr¶ï}i^(°6Þðw­Huy&·©Д­¶Ùe¼c*Wx¥yôä|±P®[uSm'½·±!ß¡°çîÔ]Ê.µ}YS1XgÎÈÙBrkK9yWWëø&$®-Åèöøís Ÿ]¤ÿ ‰¼ë(Ñãnö…¦3xìLfǨ[›Gá“QQt”Çän™p»;žTÌ `+âã›—øVV„ŽÃþÓL¶dcüÜ>ï_Öx¨ƒðÅVÙzĺ7ôúnTàj»µà¦¢Í˜Œã0#­ñœø¼îƇ® ©XÑuÌ‘™—;e—‚9¸IIð8NƇÚa&Áx•Ì¥¡?²ë¥`€â»"é‡Qýl–€Æ0†ñ5 N¢S½’_/¾âýZIùhOÓÚ‡RÜÐ…4¹¢[ˆC …Fº,ÚKNÙ–¶éëK/gwz¨ýë„ùîp¨æ˜ÿÑØêþµç–:ã Väiëjq˜å_ÚY?ƦÑJeKÛå͇ÊFBRáŠâƒoLÉŽ½oƒo7…†›eo$`l+JsøÕý[jS œù”ç÷×SãÇ9a†Û==ÄKC”R_,nÞÙmWý7pˆôE%MÈnJT´Ùu¼«„qQ‹ÐvD’âºBqc•ÀººÞëâ¯2I§©¨ÙöˆòÂÔrú‡õ­ð~~uZóV¦&N^Ng]— œN–ÔBQaíöÊxf멈>„ðqò¨ù–®Ò;â[oA_šN=×¢†V¿>¦‚¬ÂÛèÇ B¶þ ÿξ‹,óŒÆñýtššOVQNqÛA>í§\…m“Ù®†šŸ8ÒÙ?çþÆý¢ø‡7KìN3ø˜wLŸ– ;Mšf'8KFøWÏ¡føÃ›\¡ã‹Ý/$m/<~5óbÖ G§ö9ªb㪢J[˜­LÉÓ«Xî;#Öo¬ÿ×)Á“ñ§úmwp0–\Oÿ¸βU²ôG!Ìù{Iþu_c±y³Oî<]W;óÑœ¼•¤Y;BZ=p纥×h>£³ Éý·Û'þ*i.ËvZ²¸¥\õSé?Æ·&Ãp˜Í„VÉy#Äç–®MüبHí-ÿæ÷E‘äkÿ5ežÐTœ;Ùf‘{ûø©¬lw¨ÿkïÐ7ÉaŸ›ƒùT¸Åô^HªÍ“µù±9-:¯”ËìJÖðó1÷V¦áÏsÎÂÜ=SsS£è+€<0ÿ´÷èЇõMêçüª¼ê—‘uÆq E6¾oò*áZuÓùb göåŒ8ìp™S”Ÿ GCø|jÉ¥t­«J7!vÄ<ôùCýnç1}ä©'Ç*û£Ð|êà,3Çû¶¿Ç_~€¸~£#ÿiÿ*²IhŒg9ÍóIÛ"qÓ*ê²qy„:åg?á5Øl÷ÿ¼ÿ•tÚ,s¹3"Aih“€wq•KhªLµQEC@¢Š(Š(  (¢€(¢Š£Q@QEQEEPQ@QEQEEP(¢Š¢Š(QEQEEP(¢Š¢Š(Š(  (¢€ÿÙkombu-4.1.0/docs/images/kombu.jpg0000644000175000017500000034143113130603207016574 0ustar omeromer00000000000000ÿØÿàJFIF´´ÿí+Photoshop 3.08BIM%8BIMí´´8BIM&?€8BIM 8BIM8BIMó 8BIM 8BIM' 8BIMõH/fflff/ff¡™š2Z5-8BIMøpÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿèÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿèÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿèÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿÿè8BIM@@8BIM8BIMEDSC_0124nullboundsObjcRct1Top longLeftlongBtomlongRghtlongslicesVlLsObjcslicesliceIDlonggroupIDlongoriginenum ESliceOrigin autoGeneratedTypeenum ESliceTypeImg boundsObjcRct1Top longLeftlongBtomlongRghtlongurlTEXTnullTEXTMsgeTEXTaltTagTEXTcellTextIsHTMLboolcellTextTEXT horzAlignenumESliceHorzAligndefault vertAlignenumESliceVertAligndefault bgColorTypeenumESliceBGColorTypeNone topOutsetlong leftOutsetlong bottomOutsetlong rightOutsetlong8BIM( ?ð8BIM8BIM %h  à,%LÿØÿàJFIFHHÿí Adobe_CMÿîAdobed€ÿÛ„            ÿÀ  "ÿÝ ÿÄ?   3!1AQa"q2‘¡±B#$RÁb34r‚ÑC%’Sðáñcs5¢²ƒ&D“TdE£t6ÒUâeò³„ÃÓuãóF'”¤…´•ÄÔäô¥µÅÕåõVfv†–¦¶ÆÖæö7GWgw‡—§·Ç×ç÷5!1AQaq"2‘¡±B#ÁRÑð3$bár‚’CScs4ñ%¢²ƒ&5ÂÒD“T£dEU6teâò³„ÃÓuãóF”¤…´•ÄÔäô¥µÅÕåõVfv†–¦¶ÆÖæö'7GWgw‡—§·ÇÿÚ ?õT’I%)SÈêØ×.°¶Æ€KCíÑÕŒsUÅÎ}blgë‘0ÒOµÖNÖ°Yô¾…~ô”ïÑ}Y6ê\[ĵú"áúwTº›ÜKö òÍ×€^Ñ}[Ú=JÞÊ¿Ÿ§ô_™êÛé£çu¼‡— Fe­i-s1±®m@».ቆöÂ~в”¼RAˆ ö/b’àO^ª£´2öhôÌ#s}Ÿµ¿u~âA©½EóÞ“FPÿ3 ¨æÙÿ€¡ÄÅ'À³rá%ÆŸ­#Â¼Ž¢q\u Î¥ø¤ÿk¨cb±ßصLýqÇqeL긶ZóµŒÆŒ›\O沌!›c¿³J+^½%Âfýl,µ¸ÕÛuùx«ÑÖ¶Ö÷µ£$Õ³ßîþOéÁ_f=ˆwõ–·"ÊšÌk6=ÌkŽu…Ä4–=´ô¼–VçGÐõŸ±8COSËç'Ú™Q¨™ú\1ý¿Iy뺻dƒŒè1#+!ÃäïØi›Õ_c¶WƒU„ð[nSÄÿÖúæ¥qé!ö­8²ñdàOþõô=­â$˜U¬ê½:³Èa> ;ÜÍˈ'­ÚÓéÑV1ìYÿ£ºÞ^7ùÿ±°¤ü¯aÙ×6ЖÔßì³£`ôÊýoÖQ´pñ£Åþ%ñ½u{ „‡Ù¸Ã`mÿ×MkIyÞ=¹uuâæ9ö½µcdÞæ¾ÆØZûq©}̵õ|ÿ³ä}ƒ1Ô3*œÊ¬éùÕ0½-:"Q1$⤒I$?ÿÐõT’I%)s½'÷ÝòÆ³Ò hkž÷=Ûö3š[e¶ÜýŸÍ±ŸÍþ›ùºl]ã>¹å_™S*-ôò*Ìõ˜ö1íwÙðß›¹¶µÿÍe2»’º³Ù"Œl*š2¯®KÜç8»—·fG«[²òÿ£z_§÷ß“êúhùyöbgcô½Ù/ºÀÚØÚ6†KëõíÌ̦üŒÜ¯géßW£Oói—Þáúü¾–ÏYpU,~勃ùßÖKåùYÁúìÝ+ò1péd¾¬jö—†€èÿESZl»þ³[ÖG^ÀɰÓÓºCz• ºÊ?;Ъ«oÛü§ÛBÍ¿Õw[ɪ¶ž¨úoÝê¼U&¦·ý¦Ë¨²¯E¿Í?&ßKOÔÿƒM™õ¸ÂÃ…ax 9ã Çã`³nà]eÔìËê¶{+Ýv?ÙqÿÐgzi¼sé×ùß+8׌g“õ’œxâ8Ž(úµê¡úÿü2x Ï ¯õ{2룤ÓF-׸5¬Æ¬µ’8ÖçÙ^Öý'¿ÓZ.=7Ž£Õú=kqN6 6=…ÕÛKÝ]t3m÷1Œ¯?"êŸú,U\úÓë«#}uƒÒ5±ûÚC}oeõÑ›–ë?ã³ùj¾7^›±mÊ¶Š zã×úBÆŠÃ,u>u6Ìjýo[×ýÚ=Øã±)_§üVLœîD±cŒ+ô§s7þÃ5fQ‡Ózö3˜=>˜n¢æX7<0³í-‡:ß³Ûë¶Æ³ÿQ®ìßm€>·¸±ÂCØòö¸CØöûv;óW1„ú2ê62ëÂm™œ·ÔªêªÝU×3Ñè2·]é D³6œ{œûrY,¶›Ÿ|9‚ß´Scí²ÏIÔ2–Wú6ÿ§RbÆ`eUê7C£SœæaÌG—ž(ðqËÕîZ_¥ÇûîûŸqåîøî?Þ˜ ì.yýÙ%bcugØÆÝe,k)·fuÂöŠi5ÕXûrM~›ÔÜÏÒ¿ØÏæýK©JŒÞ™Ô®·¨3&Æ9ÎcÜíÎnÙöÕeUú¾ž×ï~+²,ü÷©HÆ'°ú;ŸfµƒsÛéû?êö¡ºì`}V»Ói{Û\ØàÑôéÒÛÿì,q‡‹è>ÆWêX…¯sËþsšáíÿ¢­UK1ÁÈôZiÛ‘MŒÚ××ö†²©²Í̶¼ÑzßÏÁþ‘ =™N8€}D‘}?wü&·WËeìc°=C‘E´ÑRú^-~ONÌÁýS*³ÑõXÝö‚ûgó¤^Š¼Ï¡ôæ6î‹Òë{Ÿ]×c\dK+9]ÒßðNõÑ=Z¿ÁÞý‹ÓOÙéDôáîÇþŸëGþ”RI$ŠÇÿÑõT’I%)q¿\)±ÙŒË›êÂeí~;YcÝc²èf%_ÑýÔÑSü«·7ú ¿Kzì—õ²_õ“°Hýä¸;CúkÜÖÿ_ÒôÝÿbGb<@ÔâOB é xºò®ÁêØÙYÕZ\lf]áí5¾À^ë=F±Þ›]úBë¿Ñúß¡ÿ¯äuRqñr/Äé¸lu̶“i¼¶ß²PÀIsý/Õ±Ùoèêýc*ú(M×ñ=|ü{ÿF‘E¶dä\Ú«eÖ>ëÞßÎÛê~ޝð×>œzÿ\ßPËW·Ò¯æaÑa«c¾—mö»3/ùÌËô¿Õ詊ÀñJ$úÿº9¹Œ^Ö,œ æ”MGæÅ‡‡øðÆØ¿©ºú†=57›±Ü, Ï©vÖý¶öþfæ}“þÑãÕüå•Ú›p‚w±íbgP(t‰ 2eøhýÝQÙ4.—4óPÖj˜ éJR&R&R:’u.ïÖç1ÕôÚƒë¹Î«×–n$6ºDú¬­ß¥ýʼn•˜÷·kjÆ vNEÎôñê`;ûïpöU¿¥U¬dÿƒcÕ®¿c¾ÑŒ$¼ Ü’ÚÙHqoÙ[pk+Æ·"ÿNª°YúªÞÿÐ} —;•ÓGHëíé½NÐi¢úF]¸åÆh¥sßO·Õþo/ëNʏ=&ÇW]`¶¬ª²š}»ÉÔdï±ÙVý¾»™ëfÛú_²zT[ú7-i}wéXñòŸQ®:M:iich­–Kñ风k©·Ò«mÿ¡õ?Eú[ž²3z7Réx3©Ycký¢ øn©îõ«,]U¶CYèÛ²ú®«Ñ¶Äo¬}?1¶á[vGí+óq€¯$;Ô{ŽÿA”½ûžç\ÆÙOýnÊÖ·×ü¬¶âtli#£Öê^K\Øym²¶ú®{[‹Cý_æßë~‹ÔH¨7:‡YÏ·¥õÛ1_½µ†×êíú»êzmôÿÑ®Ñr_RË•ŸfácÜúØç6f>6Mãp/÷ý«©d}Ö¢6j¦nF¶£ýÈúaÿ5I$’K_ÿÒõT’I%)pÿ[$}gé®,?þ̯þ‹®Þ»…Ãýi“õ§¥´~s²ø5½*ÓÿP’Fï;õÄ_û&ÇVU•»½›ìw§§ølýfÏô™‡úÇ`!w_[˜O@Ë#óM6OõnªOý5çî€ð¦Íø$T wÍ…Ö76i!áÌ÷7÷_ôŒwÔZí`“ígCíoþMîT1Þ, ã:[îÚ~Žï¢¬b½»ËIŒ>ý53Ûóšï£ù¯JÐÚ¾‹Ñh­®kÎæ¹¦O;ÜͳôÛcÿëŸöâi²ÊÚF€7OtþcºÇoÿEµÛ¨I ~à ^KŸ¹¤~Ú:¯Íÿƒö+y½FæâÐvn\"¶5£WÛuŒÛ²¦}/z¥{ªé™¾•¹O·' ª±ÍØõ3%·6¦šñ‘G­‹mYÔÓé²Ý—ß]•ÑúVb½Yè¸_W}M¶Œœøe—_‘š+¦†WC~Õm¬é¸ï¾Ì—ì«ÓgÛr~ÏúOÒTõiÙ½¤SÓp²0ÙwNêuYv~e¬pÉk ĦÜ&Fü_²úmÌú>µŸðvªY¶tþ‹Ô«§©ÑÔ3²ñŸ…ŽÜV›Ú®{ ¹Y9m?e¢Ë1iueÇõ½ö'jµÒÃô¾²u¬Î­¸Ø‚ë±÷´½–âú¹ôc±Œm v5µÙNS]éUWè/Åú~šT¯ ýgê”ædâ]ÓqÛöÌwc4ÞêC挆±ù×ú?[Ðõ}{ÿGèþŽ¿ð¼¾'XêX˜Y]?Á^>yýd4½ÍØê]PµÛ]o©ÞÿOßÿ´~§sz†3É,¿¥æÖþþßM®ú_KbVªK—Ôj/¡ØôzX˜5Œl,wí±Ûçö·ÑûfUöYuûéÔÿêz‹¬Ç¹™&‹±LÅ·cêÄÜæ^=;½7¿Ðº»vÿ×+·ùÅÁÃm µ¤n± êçc][Ou³ßþ Ÿ¤ÿŒÜú§Ôm9¸]4W[FU÷Ye¡¤<ì¦Ç2–Gø v²ºÿÃT˜%­w_Ã¥ö·¹ú—hõ_ŽÙÛ[z{‹LÕU·}›ô/¾Æaþ–ÏûsÒ³}Uõ˃úˆöý®­ÌªÜ:Uo-qrìe~¥¢»e7Uúvÿ=þœ|Ÿ^Š»Äå¥I$’JÿÓõT’I%)pÿYH?\:HçúqÿÙ|¿ôš»…Á}dqÿž}4ƒ£nïúóñqÿM+¯´.€³^ûxtjõêýn“Ôj?‹iÙgª?éV¼Ì{ˆ#¸šYz½Íe§Óx–XoÉxÚî?¬¼É´Úç²&ÀvGƒ°ÿU°1Çý,ü×=’6ÀÓ0w'ØŠluTK@pŸÑé"Oµû¿skTA5¹µm"ËÐ5ÜékòÒÅ£caõ'à7© n¶†¶ä2 kyôë©»+¥¾Úë~Ï[Óºßç+Þ›ºæ8¯}®e.¶–8ÖÆm2}*ëK«¿ãjý_¤[VYF 74nvÛZÇ®Û^ÑèîÿõŸc?ªîú±–ænê¹]*칸ÝFñ]ïÔÔÖXüVÿ*ÏKþ-˜[§u n‹“èÙ‘}Sx;ñÜm>yÛúOJ·3ó™gü)B ´¹]7¯àôæô£]ÙøÛ~%–¶«Ks²qò)õK[me›7~e¿£XNè¹5ôìŒ÷]‹éáÝ^=•W{n³}Þêö}—Õ£Óú¥õÿÁÛ[?K[×Oûa½G§uÞÒè?ÂæÔãfãÐM]AÙ–0V×dhËýÛüÇê•ÒõÈ~×m´œ:©²š®{-4Uµí}­º·ÓªÏѱߡ«ô¾šrŸêoNúºîžþ«Ö¾ÎK2ßK>Ù`†TÊl%¸{êû]¿§³ý?üR©õ­t,L×bUÓ²›ûOvÉvPuõÓïAÍ®ƒŒÌWèÂû?þsÔ?Tº‹«¯7«:ž‹†[üþy¼´~f6ö̇íüÇz>¢7Mé]/§õ¥5}‡Ñc½îcgÚ,ýJjõLQƒ~N Þû?&Ì}àsZíàÐçnw±öÂØõR×þßÂpCcàÏ£{˜ö·ó+²n¸:Üç2l³&÷2ç9÷NŸÊ·éÿ!]ú²}>¯‰kÌ1Žp{Ž€4ÖümßÕm—WïQX°z[-#Íô« ªŽ·UTµµÔ~ÒÖÔÈ ic†•T×>º>“÷Ó_£úk-³ìØö[jí× Óê¾°tñnæ¹×d±¡Ó0[º±­—~k}ŸÌÿáL?èõ÷JNëÀ÷·…I$’HÿÔõT’I%)p}xùÜÙ‰ôüibú•Þ.ë ‡üñk8pťÕvSÿð<$“˜Ç‹ü_SKªõzze,¶ÊÝsî°¶šší“é±¶X÷ÜYw§±¿ðVo±pù–¾û­È,âëcÐt.sŸíßúG¶¿ß]7ÖæÙø®srÝîð®Æ¿üí\½¯ —H†–:L4~îóìÚßk?‘Ý©&=¾V>3‰snºªê,±Œ²ü¯Qjä}dÏXóí[znG#ŸB±g§[^6+}<|{Úªc2}L_Kù×äþýo«Í6uþ "¼Šîxq ¼w}ª÷½ö­mTQeÏþ¢­kU¿*¬ìVVE™8¹/hÈ5ïy§Ð¡ÔÞæeé·ôÔã¿ÏSÕ³!²ì:«*vÛ°ºmõbYqoíL»u·=Þ¯èÝlQ‚ÿ£ê:ª1ï³Ôgér×WÕó~¦ôŽ£† Ù]ëŸ[,>ëqë±¾ÜQ¶×cé}TÕô2ªªý/¤¸ªú›ro³3¬¶î«•´6ŸZ÷í‘»L—Ö¬©»¿GV6F7üjê>¦SÖrú­aÖ=¬Å¦ÆS[@ýa•µûºN+¶Õe>ÿÖ=Ñãÿ9ê}²ÔBˆkõŒÚz_\«ë3›Ôžsð3+±õÜÒÿé¸ÌþÕÅÉu´äãÛFEO­ÿèm]Nêݘuætë(èg*Í®êâ±Íu¸WºÏZÞ•eŸNª½O±z^üL¿ç«¢–U]4toVºw@ê/]Ž×_‰™·Üìw;k~ÝKÁ¿Ó«©âÿÝéûúgYè4YŸ…éu^ƒ–ÐÛo¬qm¬c+ΣÛ~Õnoó¾†F%ÿÑïõ´SÑÛõ[?"Ó‹™‰Ôl±îýyù °¹­mEž­·zîo½öÿG²ÝŸ¢ÿH‡WIé9¹ÕtÜŽ½ëu›Xöc ~†·~…–åCØö5Ô~ ‡ãÿ£ÿGêbu¬îŸÖ:N%˜¸ÍÃn-yý¥Ö†¿}9Þ¯­kŸu޾·dúޱÿàÞƒõK÷õzÕçÐéÝÆeæä‘íý ›mXõêÆ¿+-û)®¶»~÷¦ÐµÚÓcªÑ¹ÕçW[i¯:¹4°mmy-wÙzž3汬Íõ/Ù»ôx¹¸‹GêÕ5XìÕºÖš›P óßëI|³ú;ôšÌÀËwQn~.C™S²ŸoRÄÜ@`ËÆÜVZ÷mgÛ1~/þÆÀz龬㚰÷=\—Ĉ;kmt Ít9›lmÞÄÞP컋Óâêá†ÕÔºi'mþ›O$Of¿{W¡/6mngF-[šÆàM¸ô»ÿe«ÒQ—EKhùý)5$’H­ÿÕõT’I%)yÿÖ=>·ƒûØÔýšÊaÿ£bôç¿Z ÿuÆ›© –w»þ‰&;¸_[ÜÒ1ÞîH?3]ä cœ`Ï–«±úâÑû…ÃÚ̪I6ÞÕÄú¤=€6w„ŽèÏÕÚýn©f-c~VGOΣ²7YŽöÓ[\ã·}Ÿ¤c=Ë;§ô¬v>:3ÍÑí·3û)§{þÓéà±»œ»=–QC?Kú?çúG@ÊêæÛmX¸Ø…¯ËÎÉvʪŸÞ³é:Û?ÀÖÏü_úÅõ“ ôN—Ôrº§¯füìü’ý¦¶íuXX,É}—Wê·Õ¿÷ý:ÿMr!Y_V×1¿lý^®âçS™Ž@£~Ml©Ù9}4Y²·âdÙw®ÌWý–úÀ〦ïÕª½v¼ün±Ôq瑩”èe¶mu®_F%oµïn.'¬ïÓ[g§G§ü»‘×¾°ât,|lzñðëuŸ¯¾©6Úâë쪻òŸ?hôÙµŒ®Wô?Î"ç}TúÇ‘ún¿Õ0è´9ÍiêY³½lkFEëÝüÝ.ö$FÅ@î4v:nNe§¨YŸÒz­†¼›±-õ™ö—¸»Ó/ ýýo~+öXʯg£ôòÿI–z‡ÕΣe½7=ôÚïkŸPhÖ¦äTíÔdûþ©ðhÁè¸Kö¶'Qv~8Çn&'¨æ‹}FYV}Ö>¶Uúƒ}K)óž§èÑëÊ£«â³:}V5ÒaáàG­NØsj·w«ïßU9¶?ó‚TAý¨ºgÖ lŒK7¥ô«ísØßYØ¡Ž{‹Ç¨ë)¦ÆãØÿDä} ŸTòºÆOWoØ3\*ǨXêñ¨kqè¡Õk¾ŒjÿFëœý%ÈÈÙúÿœYlôpóêeU½§xîÀ~Cú¾å­Ñ©¨}f7\H¯˲·ÕÁÌ%Øî©Žk¿Z³ß{šM$š4¸,´1®Ëé]J«1í_‹qÙd×´¢XïJÇcf5ߤ¯ü&5»,^‘ˆÜ_Ùø·bîf.EBú˜òlubÂël¢Ûcßök³Ô³ü¤¼ß=¶3!®{÷XÏQ¶8Kšë2s¿•kŸk—£àµÕ`׃h eWí~°+©•]ê4µŽu{ý®w­ÿmý£ª%£>›@ËúÁÊn~kÁÒ6î³_Ïuþ­¿ú ½rS(ªÜ¼ŒÛœgÑu ¼m ¬¼Á×úONÊÿîVGúUפ“ÓÁI$’HÿÖõT’I%)pu¦Ï®Qù¸Øt*Ër­×û8Õ¹wë€Í"ï®á\Ñ[÷}!NN­s¿9êge,úl¿íi˜‹¿îÏþl%'ë}uþÅ·t—±Ô¹†æÖæÏÐÝú_¢¸G3c¹ÇQÉwÿX‹méÙõ‡7x¤½xs½zï–ÇÙìfÊ·×ÿª×ÿç €ö’@Ó°>Ãÿ’D­Ww¢ãaõ®ŸgÕ{s>ÓQ«&‹Mn°XE6cÙ@Û±¬±Žý%~½•oY_WzE9=uíË®ÏÙý0Û•ÔclbÍ–U};†Ë¯{kÆu{¾Š.sÇ ¤»x$:G¸;svísßfÕ×áõ̾¦Ü¾…em¿? ìõu"Ö×uÙ 7ì•õ †–¶Êlh·›^ßQž¯¿¬•ê¢ó9k3/'')åù™Ue›7Öº‘”ÏN¶ïÞßnUU׳Ù]ÿÅ®“ê×Ö^«×>°ÕV^Tu̺‡±…•ŸIö²8 ìe/g±Ž}µÿ„þ‘úuœÏª—SÑ볨cdcgÙ•{«­ÂÜlLksnô¨°µ›²¯gµïÿ¸ß ÿ„ÞÅèù8_oúÅsM}:‹³32®wól­í¸UewÑäý©·Ðìzéý?«öŒ{ªý_ÔEÞJ‡M,yqS®èwmÞäÆïZÿD—ÍÛY² yh7½¿IŸÍÿ8ˆÊ´‹ö9 0´7pÝîuô¶îÙÿ[@¯–8Ömáõ4Zšô_pú\×ÑÂÁ±C©> Ó;‡=¢Ë+º¶fÖXÊÛëYkHØÝ‡ÓÚÿm™EV×úë§Ðô-EéöT1º“ˆ{®È¤Sú ÚØûëe®;qÙú+«éúûì©e¿k,‡¸¿H7q'—{–Îéµ`áQM¬Ä9ùY¡}ZàiÞÆþ—¿¥éWê9ÿž?%‡óM{™ÔzžÊÜ«è×½šä·Ö{[IödXßøÏWbï+®Ê {1™\†0{v¶-s«i{«úôòú¯ø}Oäru .¤}£ «æ¼¦¸€6öY{êz/¶–=˜ïÿ·2úUÚµ¬c¿D4÷™cciqÚçä:Ç·Ô}¶z?õÏðhêÙî†×QÓ¯¡ûKz•RöËŸF5ƒw·ôwÑ‘ê/H\Â9•ÝFCo×v=³¸×dNïWOY•äUêzŸð‹¯è]IýK§2ûš+ÉauYUŽmdÕnߥìÞßg¹.§Åv†1®ƒ†_ÞÆ×ú [ô÷VïÒ·ÕB])ën ,Ë®Ýíc™´8›K¶ˆúm5Ÿ§þÜ•ù@4›,{[`s@;žx‹+V~•þ÷o¾ŸÑW½z/Kúµö:™c«¶Ü–;Ôtíc‰ÜÝŒô÷3óö~üÖ‘Ôòýì±øøô5Þ­Ù-.l¹ÍÚæYg§é»oç¾ßKþ .ñkÑåð:¶M¢Î iÎÂÌu»8Üg'`:ª]X·%î¯Ý^ÌÓT˽z?Ò"ýjúÅ‘—–ÌZk1Ÿa΢·<€E¥¸»*oÐô= 2XÛvYëå¿ô¿·Ó1:3Ø>Ù_QͶ³Møoxsôô±è~U_§»óþËU÷€§#ùÊÔnú·Ò2ìÎÛ•Ó«sŸé³ØÑ`ÖúooÚqÿáú]á}Jši¯Óz³IáëWå«É`aõ¡xÄÅÇkÞw"ºš ×sí{ZÆ;óö9þ‰h;ê–SCÝg¡[vûaߺ}¾ÈǯÐÚÇ~‰¬ÿºÜ[:~=,ª¼œV4*®Ú˜öúM±ÏþR;EŽý«yŸicµ<ýRˆíS‰£ï ÿ¤òõýX¢Ûœl±ÖV÷¶Y³gÑ ¹öÿ;¶½Ï¦Êö_üçø5£‰õs¤ÐÇãP\%×4<¨Þ÷Üoc?{ß讋Îúu;o%ïèu’÷¢¿/§Uú+2±j-à:êšGö ;Aà° Kå¹yzœ¬~›\zLiÜÈ¥ŒˆŸIßÍÓ±ŸCù×ûÿàÕ£†Ëk‚ÆCY{†›ÝéúnfË75ßá?óâ¶zK#ÝŸŠí5ý=Fε›±l‡×uV´Çж³ vÜÇ9 AÙ'À¹D1MjñšÃé1¡²éporNç;ûn~ôÿVrYWZÈk]X×1Ú ^üÞ­f+÷Ï»ôu\Ïûo÷ý—Ô1«e§!ž¥P×\ȱ”—Œ}›7ú·µÿÑzu~¦n]þ^¥êZ›¢tï²}6ì·"ú^k1¾ª¨cac\àçVüšêe·æz?£ûv^Oú4,G‡í]\0×BeõDeÿ²I$CÿÐõT’I%5z˜³ìouuºç1Ì¢ÈÜð×µï¯ÞZß{Z¸Î›ƒÖº{ 2±,ϦUöšÚæ>Çc›},Œ–^=jr]‡c1¿›Ë¢ëh¯õú«]êI$/úˆïãÿ¹y;3 $áe=ð zÝý§»ÛS[ùþŠ£2:ÞX5âPÌV%ÄØò<=.žË+Ùü‹¬­vé#hCóÿ¤òÝ©XA¿'6ßäÖï±Ö>×c²Ûÿ[Ë¥\«ê¶=v‹™‰G¬5ä~žÙÿÃM˽¿öòèÒJÔurdåZæ ›ZêÚI\áàê`ýÿ”«e}TĽζöUŸk¶—¿"ªÅŽÙ»géieMß´ìúÿ]o¤‚¶ºê(ù<‰èôÅU;¨âm1¶œ«ÜÈ›è滨âìþ£6!;ê¾ñ]”÷vu˜Ø7GÇÕé;¿é.Í$Ŧ2”~YÿwG‚wÕ©ýI <îéEß犪gþ­Qõ"¦lfNkc½X¸XãüÖtÛ?ê×f’UÁ'&I 3‘&ÞHtLÁÎoRw“›ˆáÿ‚t´ ~«›]¹Öä8ø¿ §¼ÿžîšÕÚ$‘î-”ãòÈÇÈÓÊbô1k/d_‘Hshºø&¦¼=˜xôW‹ƒƒ½­÷¿«ßþ\«'Ö«ô/=†K`Òïú!o¤­"Í’¤’I¿ÿÙ8BIM!UAdobe PhotoshopAdobe Photoshop CS28BIMÿá)ExifMM* † ˜¢ª(1²2·iähNIKON CORPORATIONNIKON D50´´Adobe Photoshop CS2 Windows2008:03:24 12:01:10&‚š²‚ºˆ"0221ÂÖ‘‘ê’ ò’ú’’’ ’ ’†, ’80’‘80’’80 0100    H¢£££6¤¤¤¤>¤.¤¤¤¤ ¤ ¤  â8 2008:03:05 13:23:212008:03:05 13:23:21* 6 ASCII R980100¶¾(Æ%LHHÿØÿàJFIFHHÿí Adobe_CMÿîAdobed€ÿÛ„            ÿÀ  "ÿÝ ÿÄ?   3!1AQa"q2‘¡±B#$RÁb34r‚ÑC%’Sðáñcs5¢²ƒ&D“TdE£t6ÒUâeò³„ÃÓuãóF'”¤…´•ÄÔäô¥µÅÕåõVfv†–¦¶ÆÖæö7GWgw‡—§·Ç×ç÷5!1AQaq"2‘¡±B#ÁRÑð3$bár‚’CScs4ñ%¢²ƒ&5ÂÒD“T£dEU6teâò³„ÃÓuãóF”¤…´•ÄÔäô¥µÅÕåõVfv†–¦¶ÆÖæö'7GWgw‡—§·ÇÿÚ ?õT’I%)SÈêØ×.°¶Æ€KCíÑÕŒsUÅÎ}blgë‘0ÒOµÖNÖ°Yô¾…~ô”ïÑ}Y6ê\[ĵú"áúwTº›ÜKö òÍ×€^Ñ}[Ú=JÞÊ¿Ÿ§ô_™êÛé£çu¼‡— Fe­i-s1±®m@».ቆöÂ~в”¼RAˆ ö/b’àO^ª£´2öhôÌ#s}Ÿµ¿u~âA©½EóÞ“FPÿ3 ¨æÙÿ€¡ÄÅ'À³rá%ÆŸ­#Â¼Ž¢q\u Î¥ø¤ÿk¨cb±ßصLýqÇqeL긶ZóµŒÆŒ›\O沌!›c¿³J+^½%Âfýl,µ¸ÕÛuùx«ÑÖ¶Ö÷µ£$Õ³ßîþOéÁ_f=ˆwõ–·"ÊšÌk6=ÌkŽu…Ä4–=´ô¼–VçGÐõŸ±8COSËç'Ú™Q¨™ú\1ý¿Iy뺻dƒŒè1#+!ÃäïØi›Õ_c¶WƒU„ð[nSÄÿÖúæ¥qé!ö­8²ñdàOþõô=­â$˜U¬ê½:³Èa> ;ÜÍˈ'­ÚÓéÑV1ìYÿ£ºÞ^7ùÿ±°¤ü¯aÙ×6ЖÔßì³£`ôÊýoÖQ´pñ£Åþ%ñ½u{ „‡Ù¸Ã`mÿ×MkIyÞ=¹uuâæ9ö½µcdÞæ¾ÆØZûq©}̵õ|ÿ³ä}ƒ1Ô3*œÊ¬éùÕ0½-:"Q1$⤒I$?ÿÐõT’I%)s½'÷ÝòÆ³Ò hkž÷=Ûö3š[e¶ÜýŸÍ±ŸÍþ›ùºl]ã>¹å_™S*-ôò*Ìõ˜ö1íwÙðß›¹¶µÿÍe2»’º³Ù"Œl*š2¯®KÜç8»—·fG«[²òÿ£z_§÷ß“êúhùyöbgcô½Ù/ºÀÚØÚ6†KëõíÌ̦üŒÜ¯géßW£Oói—Þáúü¾–ÏYpU,~勃ùßÖKåùYÁúìÝ+ò1péd¾¬jö—†€èÿESZl»þ³[ÖG^ÀɰÓÓºCz• ºÊ?;Ъ«oÛü§ÛBÍ¿Õw[ɪ¶ž¨úoÝê¼U&¦·ý¦Ë¨²¯E¿Í?&ßKOÔÿƒM™õ¸ÂÃ…ax 9ã Çã`³nà]eÔìËê¶{+Ýv?ÙqÿÐgzi¼sé×ùß+8׌g“õ’œxâ8Ž(úµê¡úÿü2x Ï ¯õ{2룤ÓF-׸5¬Æ¬µ’8ÖçÙ^Öý'¿ÓZ.=7Ž£Õú=kqN6 6=…ÕÛKÝ]t3m÷1Œ¯?"êŸú,U\úÓë«#}uƒÒ5±ûÚC}oeõÑ›–ë?ã³ùj¾7^›±mÊ¶Š zã×úBÆŠÃ,u>u6Ìjýo[×ýÚ=Øã±)_§üVLœîD±cŒ+ô§s7þÃ5fQ‡Ózö3˜=>˜n¢æX7<0³í-‡:ß³Ûë¶Æ³ÿQ®ìßm€>·¸±ÂCØòö¸CØöûv;óW1„ú2ê62ëÂm™œ·ÔªêªÝU×3Ñè2·]é D³6œ{œûrY,¶›Ÿ|9‚ß´Scí²ÏIÔ2–Wú6ÿ§RbÆ`eUê7C£SœæaÌG—ž(ðqËÕîZ_¥ÇûîûŸqåîøî?Þ˜ ì.yýÙ%bcugØÆÝe,k)·fuÂöŠi5ÕXûrM~›ÔÜÏÒ¿ØÏæýK©JŒÞ™Ô®·¨3&Æ9ÎcÜíÎnÙöÕeUú¾ž×ï~+²,ü÷©HÆ'°ú;ŸfµƒsÛéû?êö¡ºì`}V»Ói{Û\ØàÑôéÒÛÿì,q‡‹è>ÆWêX…¯sËþsšáíÿ¢­UK1ÁÈôZiÛ‘MŒÚ××ö†²©²Í̶¼ÑzßÏÁþ‘ =™N8€}D‘}?wü&·WËeìc°=C‘E´ÑRú^-~ONÌÁýS*³ÑõXÝö‚ûgó¤^Š¼Ï¡ôæ6î‹Òë{Ÿ]×c\dK+9]ÒßðNõÑ=Z¿ÁÞý‹ÓOÙéDôáîÇþŸëGþ”RI$ŠÇÿÑõT’I%)q¿\)±ÙŒË›êÂeí~;YcÝc²èf%_ÑýÔÑSü«·7ú ¿Kzì—õ²_õ“°Hýä¸;CúkÜÖÿ_ÒôÝÿbGb<@ÔâOB é xºò®ÁêØÙYÕZ\lf]áí5¾À^ë=F±Þ›]úBë¿Ñúß¡ÿ¯äuRqñr/Äé¸lu̶“i¼¶ß²PÀIsý/Õ±Ùoèêýc*ú(M×ñ=|ü{ÿF‘E¶dä\Ú«eÖ>ëÞßÎÛê~ޝð×>œzÿ\ßPËW·Ò¯æaÑa«c¾—mö»3/ùÌËô¿Õ詊ÀñJ$úÿº9¹Œ^Ö,œ æ”MGæÅ‡‡øðÆØ¿©ºú†=57›±Ü, Ï©vÖý¶öþfæ}“þÑãÕüå•Ú›p‚w±íbgP(t‰ 2eøhýÝQÙ4.—4óPÖj˜ éJR&R&R:’u.ïÖç1ÕôÚƒë¹Î«×–n$6ºDú¬­ß¥ýʼn•˜÷·kjÆ vNEÎôñê`;ûïpöU¿¥U¬dÿƒcÕ®¿c¾ÑŒ$¼ Ü’ÚÙHqoÙ[pk+Æ·"ÿNª°YúªÞÿÐ} —;•ÓGHëíé½NÐi¢úF]¸åÆh¥sßO·Õþo/ëNʏ=&ÇW]`¶¬ª²š}»ÉÔdï±ÙVý¾»™ëfÛú_²zT[ú7-i}wéXñòŸQ®:M:iich­–Kñ风k©·Ò«mÿ¡õ?Eú[ž²3z7Réx3©Ycký¢ øn©îõ«,]U¶CYèÛ²ú®«Ñ¶Äo¬}?1¶á[vGí+óq€¯$;Ô{ŽÿA”½ûžç\ÆÙOýnÊÖ·×ü¬¶âtli#£Öê^K\Øym²¶ú®{[‹Cý_æßë~‹ÔH¨7:‡YÏ·¥õÛ1_½µ†×êíú»êzmôÿÑ®Ñr_RË•ŸfácÜúØç6f>6Mãp/÷ý«©d}Ö¢6j¦nF¶£ýÈúaÿ5I$’K_ÿÒõT’I%)pÿ[$}gé®,?þ̯þ‹®Þ»…Ãýi“õ§¥´~s²ø5½*ÓÿP’Fï;õÄ_û&ÇVU•»½›ìw§§ølýfÏô™‡úÇ`!w_[˜O@Ë#óM6OõnªOý5çî€ð¦Íø$T wÍ…Ö76i!áÌ÷7÷_ôŒwÔZí`“ígCíoþMîT1Þ, ã:[îÚ~Žï¢¬b½»ËIŒ>ý53Ûóšï£ù¯JÐÚ¾‹Ñh­®kÎæ¹¦O;ÜͳôÛcÿëŸöâi²ÊÚF€7OtþcºÇoÿEµÛ¨I ~à ^KŸ¹¤~Ú:¯Íÿƒö+y½FæâÐvn\"¶5£WÛuŒÛ²¦}/z¥{ªé™¾•¹O·' ª±ÍØõ3%·6¦šñ‘G­‹mYÔÓé²Ý—ß]•ÑúVb½Yè¸_W}M¶Œœøe—_‘š+¦†WC~Õm¬é¸ï¾Ì—ì«ÓgÛr~ÏúOÒTõiÙ½¤SÓp²0ÙwNêuYv~e¬pÉk ĦÜ&Fü_²úmÌú>µŸðvªY¶tþ‹Ô«§©ÑÔ3²ñŸ…ŽÜV›Ú®{ ¹Y9m?e¢Ë1iueÇõ½ö'jµÒÃô¾²u¬Î­¸Ø‚ë±÷´½–âú¹ôc±Œm v5µÙNS]éUWè/Åú~šT¯ ýgê”ædâ]ÓqÛöÌwc4ÞêC挆±ù×ú?[Ðõ}{ÿGèþŽ¿ð¼¾'XêX˜Y]?Á^>yýd4½ÍØê]PµÛ]o©ÞÿOßÿ´~§sz†3É,¿¥æÖþþßM®ú_KbVªK—Ôj/¡ØôzX˜5Œl,wí±Ûçö·ÑûfUöYuûéÔÿêz‹¬Ç¹™&‹±LÅ·cêÄÜæ^=;½7¿Ðº»vÿ×+·ùÅÁÃm µ¤n± êçc][Ou³ßþ Ÿ¤ÿŒÜú§Ôm9¸]4W[FU÷Ye¡¤<ì¦Ç2–Gø v²ºÿÃT˜%­w_Ã¥ö·¹ú—hõ_ŽÙÛ[z{‹LÕU·}›ô/¾Æaþ–ÏûsÒ³}Uõ˃úˆöý®­ÌªÜ:Uo-qrìe~¥¢»e7Uúvÿ=þœ|Ÿ^Š»Äå¥I$’JÿÓõT’I%)pÿYH?\:HçúqÿÙ|¿ôš»…Á}dqÿž}4ƒ£nïúóñqÿM+¯´.€³^ûxtjõêýn“Ôj?‹iÙgª?éV¼Ì{ˆ#¸šYz½Íe§Óx–XoÉxÚî?¬¼É´Úç²&ÀvGƒ°ÿU°1Çý,ü×=’6ÀÓ0w'ØŠluTK@pŸÑé"Oµû¿skTA5¹µm"ËÐ5ÜékòÒÅ£caõ'à7© n¶†¶ä2 kyôë©»+¥¾Úë~Ï[Óºßç+Þ›ºæ8¯}®e.¶–8ÖÆm2}*ëK«¿ãjý_¤[VYF 74nvÛZÇ®Û^ÑèîÿõŸc?ªîú±–ænê¹]*칸ÝFñ]ïÔÔÖXüVÿ*ÏKþ-˜[§u n‹“èÙ‘}Sx;ñÜm>yÛúOJ·3ó™gü)B ´¹]7¯àôæô£]ÙøÛ~%–¶«Ks²qò)õK[me›7~e¿£XNè¹5ôìŒ÷]‹éáÝ^=•W{n³}Þêö}—Õ£Óú¥õÿÁÛ[?K[×Oûa½G§uÞÒè?ÂæÔãfãÐM]AÙ–0V×dhËýÛüÇê•ÒõÈ~×m´œ:©²š®{-4Uµí}­º·ÓªÏѱߡ«ô¾šrŸêoNúºîžþ«Ö¾ÎK2ßK>Ù`†TÊl%¸{êû]¿§³ý?üR©õ­t,L×bUÓ²›ûOvÉvPuõÓïAÍ®ƒŒÌWèÂû?þsÔ?Tº‹«¯7«:ž‹†[üþy¼´~f6ö̇íüÇz>¢7Mé]/§õ¥5}‡Ñc½îcgÚ,ýJjõLQƒ~N Þû?&Ì}àsZíàÐçnw±öÂØõR×þßÂpCcàÏ£{˜ö·ó+²n¸:Üç2l³&÷2ç9÷NŸÊ·éÿ!]ú²}>¯‰kÌ1Žp{Ž€4ÖümßÕm—WïQX°z[-#Íô« ªŽ·UTµµÔ~ÒÖÔÈ ic†•T×>º>“÷Ó_£úk-³ìØö[jí× Óê¾°tñnæ¹×d±¡Ó0[º±­—~k}ŸÌÿáL?èõ÷JNëÀ÷·…I$’HÿÔõT’I%)p}xùÜÙ‰ôüibú•Þ.ë ‡üñk8pťÕvSÿð<$“˜Ç‹ü_SKªõzze,¶ÊÝsî°¶šší“é±¶X÷ÜYw§±¿ðVo±pù–¾û­È,âëcÐt.sŸíßúG¶¿ß]7ÖæÙø®srÝîð®Æ¿üí\½¯ —H†–:L4~îóìÚßk?‘Ý©&=¾V>3‰snºªê,±Œ²ü¯Qjä}dÏXóí[znG#ŸB±g§[^6+}<|{Úªc2}L_Kù×äþýo«Í6uþ "¼Šîxq ¼w}ª÷½ö­mTQeÏþ¢­kU¿*¬ìVVE™8¹/hÈ5ïy§Ð¡ÔÞæeé·ôÔã¿ÏSÕ³!²ì:«*vÛ°ºmõbYqoíL»u·=Þ¯èÝlQ‚ÿ£ê:ª1ï³Ôgér×WÕó~¦ôŽ£† Ù]ëŸ[,>ëqë±¾ÜQ¶×cé}TÕô2ªªý/¤¸ªú›ro³3¬¶î«•´6ŸZ÷í‘»L—Ö¬©»¿GV6F7üjê>¦SÖrú­aÖ=¬Å¦ÆS[@ýa•µûºN+¶Õe>ÿÖ=Ñãÿ9ê}²ÔBˆkõŒÚz_\«ë3›Ôžsð3+±õÜÒÿé¸ÌþÕÅÉu´äãÛFEO­ÿèm]Nêݘuætë(èg*Í®êâ±Íu¸WºÏZÞ•eŸNª½O±z^üL¿ç«¢–U]4toVºw@ê/]Ž×_‰™·Üìw;k~ÝKÁ¿Ó«©âÿÝéûúgYè4YŸ…éu^ƒ–ÐÛo¬qm¬c+ΣÛ~Õnoó¾†F%ÿÑïõ´SÑÛõ[?"Ó‹™‰Ôl±îýyù °¹­mEž­·zîo½öÿG²ÝŸ¢ÿH‡WIé9¹ÕtÜŽ½ëu›Xöc ~†·~…–åCØö5Ô~ ‡ãÿ£ÿGêbu¬îŸÖ:N%˜¸ÍÃn-yý¥Ö†¿}9Þ¯­kŸu޾·dúޱÿàÞƒõK÷õzÕçÐéÝÆeæä‘íý ›mXõêÆ¿+-û)®¶»~÷¦ÐµÚÓcªÑ¹ÕçW[i¯:¹4°mmy-wÙzž3汬Íõ/Ù»ôx¹¸‹GêÕ5XìÕºÖš›P óßëI|³ú;ôšÌÀËwQn~.C™S²ŸoRÄÜ@`ËÆÜVZ÷mgÛ1~/þÆÀz龬㚰÷=\—Ĉ;kmt Ít9›lmÞÄÞP컋Óâêá†ÕÔºi'mþ›O$Of¿{W¡/6mngF-[šÆàM¸ô»ÿe«ÒQ—EKhùý)5$’H­ÿÕõT’I%)yÿÖ=>·ƒûØÔýšÊaÿ£bôç¿Z ÿuÆ›© –w»þ‰&;¸_[ÜÒ1ÞîH?3]ä cœ`Ï–«±úâÑû…ÃÚ̪I6ÞÕÄú¤=€6w„ŽèÏÕÚýn©f-c~VGOΣ²7YŽöÓ[\ã·}Ÿ¤c=Ë;§ô¬v>:3ÍÑí·3û)§{þÓéà±»œ»=–QC?Kú?çúG@ÊêæÛmX¸Ø…¯ËÎÉvʪŸÞ³é:Û?ÀÖÏü_úÅõ“ ôN—Ôrº§¯füìü’ý¦¶íuXX,É}—Wê·Õ¿÷ý:ÿMr!Y_V×1¿lý^®âçS™Ž@£~Ml©Ù9}4Y²·âdÙw®ÌWý–úÀ〦ïÕª½v¼ün±Ôq瑩”èe¶mu®_F%oµïn.'¬ïÓ[g§G§ü»‘×¾°ât,|lzñðëuŸ¯¾©6Úâë쪻òŸ?hôÙµŒ®Wô?Î"ç}TúÇ‘ún¿Õ0è´9ÍiêY³½lkFEëÝüÝ.ö$FÅ@î4v:nNe§¨YŸÒz­†¼›±-õ™ö—¸»Ó/ ýýo~+öXʯg£ôòÿI–z‡ÕΣe½7=ôÚïkŸPhÖ¦äTíÔdûþ©ðhÁè¸Kö¶'Qv~8Çn&'¨æ‹}FYV}Ö>¶Uúƒ}K)óž§èÑëÊ£«â³:}V5ÒaáàG­NØsj·w«ïßU9¶?ó‚TAý¨ºgÖ lŒK7¥ô«ísØßYØ¡Ž{‹Ç¨ë)¦ÆãØÿDä} ŸTòºÆOWoØ3\*ǨXêñ¨kqè¡Õk¾ŒjÿFëœý%ÈÈÙúÿœYlôpóêeU½§xîÀ~Cú¾å­Ñ©¨}f7\H¯˲·ÕÁÌ%Øî©Žk¿Z³ß{šM$š4¸,´1®Ëé]J«1í_‹qÙd×´¢XïJÇcf5ߤ¯ü&5»,^‘ˆÜ_Ùø·bîf.EBú˜òlubÂël¢Ûcßök³Ô³ü¤¼ß=¶3!®{÷XÏQ¶8Kšë2s¿•kŸk—£àµÕ`׃h eWí~°+©•]ê4µŽu{ý®w­ÿmý£ª%£>›@ËúÁÊn~kÁÒ6î³_Ïuþ­¿ú ½rS(ªÜ¼ŒÛœgÑu ¼m ¬¼Á×úONÊÿîVGúUפ“ÓÁI$’HÿÖõT’I%)pu¦Ï®Qù¸Øt*Ër­×û8Õ¹wë€Í"ï®á\Ñ[÷}!NN­s¿9êge,úl¿íi˜‹¿îÏþl%'ë}uþÅ·t—±Ô¹†æÖæÏÐÝú_¢¸G3c¹ÇQÉwÿX‹méÙõ‡7x¤½xs½zï–ÇÙìfÊ·×ÿª×ÿç €ö’@Ó°>Ãÿ’D­Ww¢ãaõ®ŸgÕ{s>ÓQ«&‹Mn°XE6cÙ@Û±¬±Žý%~½•oY_WzE9=uíË®ÏÙý0Û•ÔclbÍ–U};†Ë¯{kÆu{¾Š.sÇ ¤»x$:G¸;svísßfÕ×áõ̾¦Ü¾…em¿? ìõu"Ö×uÙ 7ì•õ †–¶Êlh·›^ßQž¯¿¬•ê¢ó9k3/'')åù™Ue›7Öº‘”ÏN¶ïÞßnUU׳Ù]ÿÅ®“ê×Ö^«×>°ÕV^Tu̺‡±…•ŸIö²8 ìe/g±Ž}µÿ„þ‘úuœÏª—SÑ볨cdcgÙ•{«­ÂÜlLksnô¨°µ›²¯gµïÿ¸ß ÿ„ÞÅèù8_oúÅsM}:‹³32®wól­í¸UewÑäý©·Ðìzéý?«öŒ{ªý_ÔEÞJ‡M,yqS®èwmÞäÆïZÿD—ÍÛY² yh7½¿IŸÍÿ8ˆÊ´‹ö9 0´7pÝîuô¶îÙÿ[@¯–8Ömáõ4Zšô_pú\×ÑÂÁ±C©> Ó;‡=¢Ë+º¶fÖXÊÛëYkHØÝ‡ÓÚÿm™EV×úë§Ðô-EéöT1º“ˆ{®È¤Sú ÚØûëe®;qÙú+«éúûì©e¿k,‡¸¿H7q'—{–Îéµ`áQM¬Ä9ùY¡}ZàiÞÆþ—¿¥éWê9ÿž?%‡óM{™ÔzžÊÜ«è×½šä·Ö{[IödXßøÏWbï+®Ê {1™\†0{v¶-s«i{«úôòú¯ø}Oäru .¤}£ «æ¼¦¸€6öY{êz/¶–=˜ïÿ·2úUÚµ¬c¿D4÷™cciqÚçä:Ç·Ô}¶z?õÏðhêÙî†×QÓ¯¡ûKz•RöËŸF5ƒw·ôwÑ‘ê/H\Â9•ÝFCo×v=³¸×dNïWOY•äUêzŸð‹¯è]IýK§2ûš+ÉauYUŽmdÕnߥìÞßg¹.§Åv†1®ƒ†_ÞÆ×ú [ô÷VïÒ·ÕB])ën ,Ë®Ýíc™´8›K¶ˆúm5Ÿ§þÜ•ù@4›,{[`s@;žx‹+V~•þ÷o¾ŸÑW½z/Kúµö:™c«¶Ü–;Ôtíc‰ÜÝŒô÷3óö~üÖ‘Ôòýì±øøô5Þ­Ù-.l¹ÍÚæYg§é»oç¾ßKþ .ñkÑåð:¶M¢Î iÎÂÌu»8Üg'`:ª]X·%î¯Ý^ÌÓT˽z?Ò"ýjúÅ‘—–ÌZk1Ÿa΢·<€E¥¸»*oÐô= 2XÛvYëå¿ô¿·Ó1:3Ø>Ù_QͶ³Møoxsôô±è~U_§»óþËU÷€§#ùÊÔnú·Ò2ìÎÛ•Ó«sŸé³ØÑ`ÖúooÚqÿáú]á}Jši¯Óz³IáëWå«É`aõ¡xÄÅÇkÞw"ºš ×sí{ZÆ;óö9þ‰h;ê–SCÝg¡[vûaߺ}¾ÈǯÐÚÇ~‰¬ÿºÜ[:~=,ª¼œV4*®Ú˜öúM±ÏþR;EŽý«yŸicµ<ýRˆíS‰£ï ÿ¤òõýX¢Ûœl±ÖV÷¶Y³gÑ ¹öÿ;¶½Ï¦Êö_üçø5£‰õs¤ÐÇãP\%×4<¨Þ÷Üoc?{ß讋Îúu;o%ïèu’÷¢¿/§Uú+2±j-à:êšGö ;Aà° Kå¹yzœ¬~›\zLiÜÈ¥ŒˆŸIßÍÓ±ŸCù×ûÿàÕ£†Ëk‚ÆCY{†›ÝéúnfË75ßá?óâ¶zK#ÝŸŠí5ý=Fε›±l‡×uV´Çж³ vÜÇ9 AÙ'À¹D1MjñšÃé1¡²éporNç;ûn~ôÿVrYWZÈk]X×1Ú ^üÞ­f+÷Ï»ôu\Ïûo÷ý—Ô1«e§!ž¥P×\ȱ”—Œ}›7ú·µÿÑzu~¦n]þ^¥êZ›¢tï²}6ì·"ú^k1¾ª¨cac\àçVüšêe·æz?£ûv^Oú4,G‡í]\0×BeõDeÿ²I$CÿÐõT’I%5z˜³ìouuºç1Ì¢ÈÜð×µï¯ÞZß{Z¸Î›ƒÖº{ 2±,ϦUöšÚæ>Çc›},Œ–^=jr]‡c1¿›Ë¢ëh¯õú«]êI$/úˆïãÿ¹y;3 $áe=ð zÝý§»ÛS[ùþŠ£2:ÞX5âPÌV%ÄØò<=.žË+Ùü‹¬­vé#hCóÿ¤òÝ©XA¿'6ßäÖï±Ö>×c²Ûÿ[Ë¥\«ê¶=v‹™‰G¬5ä~žÙÿÃM˽¿öòèÒJÔurdåZæ ›ZêÚI\áàê`ýÿ”«e}TĽζöUŸk¶—¿"ªÅŽÙ»géieMß´ìúÿ]o¤‚¶ºê(ù<‰èôÅU;¨âm1¶œ«ÜÈ›è滨âìþ£6!;ê¾ñ]”÷vu˜Ø7GÇÕé;¿é.Í$Ŧ2”~YÿwG‚wÕ©ýI <îéEß犪gþ­Qõ"¦lfNkc½X¸XãüÖtÛ?ê×f’UÁ'&I 3‘&ÞHtLÁÎoRw“›ˆáÿ‚t´ ~«›]¹Öä8ø¿ §¼ÿžîšÕÚ$‘î-”ãòÈÇÈÓÊbô1k/d_‘Hshºø&¦¼=˜xôW‹ƒƒ½­÷¿«ßþ\«'Ö«ô/=†K`Òïú!o¤­"Í’¤’I¿ÿÙÿáCYhttp://ns.adobe.com/xap/1.0/ 1 2 1800000/10000 1800000/10000 2 NIKON CORPORATION NIKON D50 256,257,258,259,262,274,277,284,530,531,282,283,296,301,318,319,529,532,306,270,271,272,305,315,33432;7F2E6A5D5BEFC3067CAFD64E23E5F118 2008-03-24T12:01:10+01:00 Adobe Photoshop CS2 Windows 2008-03-24T12:01:10+01:00 2008-03-24T12:01:10+01:00 0221 0100 1 1 2 3 0 4/1 400 400 2008-03-05T13:23:21+01:00 2008-03-05T13:23:21+01:00 10/1250 56/10 0 0/6 42/10 5 0 True 3 3 False False 310/10 2 3 1 0 0 0 1/1 46 0 0 0 0 0 0 36864,40960,40961,37121,37122,40962,40963,37510,40964,36867,36868,33434,33437,34850,34852,34855,34856,37377,37378,37379,37380,37381,37382,37383,37384,37385,37386,37396,41483,41484,41486,41487,41488,41492,41493,41495,41728,41729,41730,41985,41986,41987,41988,41989,41990,41991,41992,41993,41994,41995,41996,42016,0,2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,20,22,23,24,25,26,27,28,30;01E21FAC5F15A1DAF80AC79893D4F821 uuid:43F4408690F9DC1192FB80ADC6E99C63 uuid:44F4408690F9DC1192FB80ADC6E99C63 uuid:42F4408690F9DC1192FB80ADC6E99C63 uuid:42F4408690F9DC1192FB80ADC6E99C63 image/jpeg 3 sRGB IEC61966-2.1 ÿÛC      ÿÛC  ÿÀÿÄ  ÿÄP ! 1A"Q2aq #BR‘3¡±$br‚Á%45Ccs’¢ÑDSt²&6dƒ³ÂáÿÄ ÿÄV !"1AQ2aq#BR‘¡±ð3bÁÑ$r’á4C²ÂÒñ%Scs‚ƒ¢Ó&5D“T6UVd£³ÃÿÚ ?ßÍ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"mV÷~Ôµê¯A©Ü”*tÈà)Ö$Ím—2 ÷"ç;ïeêt]–ê›HÉRj Ï–ˆœ”úŒzÄ¥D}©1ßH[n¶°¤-'¸ ŽÄh‹§DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ~)a>¤ pT®Šm¥®\è‘ßÞS®¥!?™'DLz÷V{{ACœ®zl§ZÏ6"8$<œz‚„eYü14«zÛmñû&‰rV’¯ßDa(R(WôIÒë\IÙÓÞúIßUI.Ò>Êf’ÃI.‡°98ì}fËe$ë"4DhˆÑ¢#DFˆ"4DhˆÑ¢*×uS.T2ž­BKŽ)M$•ùIäwÀk,ËRQC–ý5¶Ê:š{`æ}û³­Ôx“ç¦n¬åt©QT)í;7o_W'£°Œ»CQVTó( k¹RÐ/t÷ùU£ºÜ1-·®(e5F›-‰°¦6—˜y¥…!Ä(d(õX[®ýyH”ÜTòqh@ú“D{“×.Ýí¼¥Ä5‘Z¨6µ6¸´–ÌÇXý×<¼†ÿß#Z9ŠÎHTßkfЮxÊ>¹¤¼àþ¨Yg'àŽ,ÜR¼n¾6ÖF9Ô«ÿhsR?¯•ñÖØIa)AëgkçÿwS£ÿ÷Aq¿ÿ N˜I.ÉJ'V{e9iKWýž·Ù(v9¨ýç“ý5®%¶ì¡ß4{‘°¸8SFAiä«·òÓÕ)ùèRsÍ8>ùÙÚ;-~èˆÑ¢/æ³¥-Ç[BêJ€DMË“y­;E¯2§qQá ô¶ÐùäöþzÎK¦ug­+š°#ÔåUÁìM†ìÄgñ-¤ùžÚÛVëMc&üž½è̺¯.عd5œ%Mü:ùãˆÇøëmS¦±—‡ÿ:?2 Ÿv£èUðŠú^'ZêÝk­eá+Äž3äÚµåÿÔ ¤‚Žš·[kr9â àQ ص)~ø©Æhù-A_᦭ÖA_)ñ’¯[ {^"¿ÉGM[¬ëy½â1Ìy6zq’žŽÿ‘NuŒ˜ÆëÅ^ µ¥(ð°X)æ¾ÒÀ£XÂKÅ|¹âYSiÅ’ÃJ> Ö[^?¢5œ+lK†w_WK©Ä[fŠÁ'‘5K þHõ«¦$;ª]Í»¤)˜ScEKÿu¸Tâëˆ'øT¬ê5¸»¦$‡~¹xSèÿ~^ÏÛôâ9ë·tvH÷ËR=½Ž¡’¢~ÐÙ¼VusPÂúéýW" »¾»%*±ÚõÂÔ—³Ÿ@µ’OõÔ¤©u˜òçg²›Í¥öIðh¦M%5*S”ªí=H.5"—1¹,8ÈùÐHH?ަ†x§P›;v(œI‹ ”>æä9tHuõ}¹ !N6O˜”ƈØRJòAïtÚm’³®yT\¸²»Þ/J•´??áÄÙ2VòÒÈ“Ž#óÀïª&"Ï„w+°âô¬&°¥Fˆ"4DhˆÑ¢#DFˆ"4DhˆÑ;ñ §¸ÎëY³8ÂàKh‘üYAþºeC#æ«ô¬©?SœÌúgR[ei{¤ ò¢Ê«L€Ì¸ÎÔ „Òà/ÆJÇÊ\O¨#®F.Xx¶õ'Tq%=½»®-µËvÍ~­n¶\RÖÔE¤´TNJ‹k$ÿ-n̳¿ª¦»?¬}É£Á[rjôÚÒ–œ¥ÃKN##¾)8üF· «Hé—^ÍÑJÍÓ^©VÙYÊâ-Ï&#¸ýŠ0“@H'Q ˜ Ù^ýåpb›MY퀖cãüp5»?ª…‡¬IÀÞßÜjH+¦Éh+¶]Xl~}ÎŽbÝgfñZlú¨ookŽ+ŠŽ¥}>9±ßòα®‹ÛozÓ r^®m]Ðç"ŠJÝþ\”/üޤcê›{Ö©:¥·5ø¨&MhHÿº öü5¾.ÔÙMZµ‡ Å6†Ñ*ì¢ôAÛù‘­/ÁJ%µ½|Ñi,Ûj[´µÍ¥¸¡…˜rÜd,L„å¨Jöl·ÆIÕFÞ ÒŠ”& ã]Žd‡J^ðÂÁÖº—G?ºœTΧ÷F*‡ ÎŽ=ÊdÒ›=¿4„5$Þ²Ô¦õRã½bnC”ÿ‡n}¾Ì‚¡™Fœ!>øF@γªÙÞ³¬nI¹\Þ«òäp*méXo¨€ ¯øëlÉÓ~SU¤yófÏš³¥È’·=ûç$ëm‰e|UM½µö\‹ªäªQ,«R)ê´ð–ÄžÈeæuÃ씃­jja¥ƒÎ*ÏV?ìâ³GIW¤+Fè˜ ¦¥÷ðí'Ü"ÜÝ06·­­¸Ýšº·6}HíÎÖÚµ´ÐÿXîG3öƒªH(Pm8)+ @î==uÍÑZv*à’q³zÄþû—¬é¿@tE'¥£Ò’ M0c!¬y»awâù>|Sf©âÁÑý"BRÿRVó…Gº£Û³d$~|F­þ”…ú ¸ëÇù±¿/{$õx¿tn§?å)JAÏblª²‡ó cYý'°ºL}žõ쟮ŽVGSCùÚ4ŸêFµý-±'îÿ7>ÏzýW‹?G{u'JR±Û¼áß¿¹N±úVbOÝþ l}žõçÿÅ‹£ç>tõ!D Ï~vÜÑþI[~“ƒØ?Ü[y´žö^¼]:<†•)Τ¨à'?rϪ>¯è‘ßAÒqÎý×dócn^öMÚÏŽ?EVêU½wut·ÿgG°¥²§?dŽ?×RyÑ?VA„›¬L©þ7;¹—gzgêz¥í)q+¿E ·””ÿ´YM&[©°~Ó»[¿,?‚x£H{¾¸§ 7;Ä xé«›it¿²Bx$"£Ö>ЖÈ_`J~q3ôÒZ)@Z©Û²ß?Hß%žÃ‹³“òçrî‹ y.ª”Ênøx›ÚvjhÿI¡íƒi„èÈÉl)ŠUŒãºO¾uʪÒ}…µ•/uÝŸ½›ÿÒ½>†è¯I´™aÑú2WgÜï~â&·Ô+VéÃú—R&©tõ!Ô¥Û1ÃûUc,ž$ÿÚ– Hú¨êœ-уhôuöqïfÿ ÷ÑùéÕGÛ P3oyd¾þ7&÷¥ê¾ÛtSO¤CEÍÑŽìÛtt!)‘S59¬¼ŽçæQSˆIÈÁì¡«’tÚ°¤“Els³ÿ…¾~*zoäó§*p .™¤’OaŸ{]þ cm<:aP!V÷gï~k±nJZ~2§´× —Vi>±”ۇ˔’¾)y.$žÁÔ«o£ô¶‚ÓÒ4ax*8_.ÜŸ•߃ûCeá:UЮ‘ôIÚ>@Ìöb¸Ë¸›+ö>ìV3 N²(> Ôú&ØD´·bÀx&þ²ê/¹%!Â|¥É‰Ô°\I h§ÌiyI­G’·"È Æt±ò¶ñ¥ã'Ô ¤Žúž,*¼¢Nªþà·rÓíµ9gÛîû§¹"‘&a†‡ÙIÊ–Û™8H*Á?»¨ê£êl.|‹u¿5wD6’°cÓ;ï[ávå|”$ÕÉ[ÞkÖø»¶¿dªµzuF˜ªrê]uk¤‹kDÄ€òã¯)I <”Ë ×è΋t’»kFèÙ Ÿý“³x³·ÅG×DÝ,1`G¸o¼:‹¾•PTý˜ãèt:‘È’Ëèqa8ïËÓæÕséFYµš‚/uþ ËÑSy'éÔõn4dkæBÃoÚÄÃ~Í阾¼>Ÿp•õÕtžî),‘ùÿÑ3¨ÿ=/ýŸ¾Ë¬þB| öíÿËýÄ£Hè—¢w ¬®¤( tÉmCùF«ŸIz0/r§vî üÕ)üŒôþµ¿t±?ÿèžtŽŒöþlOÍ <Ÿî›ªJš–ÓìÚÃWéôÿE&!ðvÿ­¾k‹/“žžBW-+ûå‰H¶Ãu_NC?ÙωËnû;ññ/‰W˜‰ ïõ?Ž­Å_ÑÂ-\®/Úû½ò~ Î×t{¤®þy£ewˆÅ½øE¾)Ù2»â1¶ĪÖÂtÿ¿td§ŠåYÕVãKÿhú ȺÁ×VcqRÖ ÿ›°7ÅpÆX±jägåóMН‹d½ž`ÿlÝu1¶ŠlñvU9•L§ƒõBäFi*Oû+:Üit£R271w–&ozœ£€‹›ø³=½Ïø/:gét„´:¯¼4i 8TiVÛKZД¬Žß–«ÉQUÌ”¦Þ!ø¨üݽ¶÷:ó—úB1"¤Ý>ƒku |Õ‚Ìj].!ñì@pù˜?‚ui¡Ò.YR“wÿu0aÚ6N¸ñê «zãԇ覩@O•]›±R“—NB»y®4C(t3Å¥­_Dh4uýiÈ"kï{»ûŸ?p—5±œ 8‰þL’·Ó¯jçI’ÕDŸ}Ú»ï½-?hµJ¥6 ž„|–[AK’=R—Çæî¾]x½7ÒºjFÔèÒy$¾fVvnÁËÇ6ñ»7Ü<šù¯é@=~–2¥¡Í› ;I#ö93Ø|ü2Í©¾ÿoëÔõÉM»wé]ι‹q˜QB|˜4Œí³a(N0â ãóúù½utõ‡çUäoÏêÌÝfì_°úÐý Ñh%Ñ:™¢pfw=ç'iûøåÂÌ®H"̵¼ã½m”íã·+·Ã¡v”FÔµÌy¹¤  ‚@ýᯥôN¢z-UTðëIÎØm~÷.Kòï–í}"ò¦:jF™†˜IÌßg~ìì×{åšãEɵ HMÂÕ‰ |óm§•Û_‚?×:»I:Pgèô{7üùÆË‹‘MÒºOJÞ#ÿy’­V­@:WáufFd£ä&ˆžeYô!qQ}uƒ¤}0¶Í/öYT«òSÐØÇÿÄôîýß”¤—nêý2·SK? Ûje2+A´¹.†ÆT}ÈãI#·o˜¤ý0 –‚Ýöþ ˆù0èS6*ž”AýÖgùÊÏðHnAyääxWíî?ï-¶Æ:„ºAÓ}¨áþ%·þôJÃÜ?÷W£9ث»nsëÚÛlwþQ5ŸÓ=.ÿíÃþñ-ÛÉ¿“ßÿU¹¿î$ІÔÝWMXTh¾ûQHn&?fý5¦UË¿†Â‡¿dêí?HºX ‡R7îñ.‘òwÐR6/ç8³r~eóRmõII¹_©Z'ôóµ ºÚCrfÓ  @R}V“æ…ØþZ­&’é„Ñᘂ6nÛ_Üd¥£¾H¨]U§&™ßÕ‰í᪚zÈ·:¾ÜT¼››©Ûo">Ÿ,À³©É¨8Êb?fÐRôßÏ\ÓÐúN¡™ë«÷û ÿ1À¦ =äÇFÜt^‡ªª7õ¦'Œ_²ÎX_÷<m^{syLj^ãn^òKEFqb+ªó‡ œHÏ  Þ.ŠhÆ=dØå¼öùgñW¿ñ»JÒ¯£ú&’¹³b,·ul/îR ‡ÐÞÁí”PÝf-‰‹'ûêûîU8TåŒk© ˆpÇL>-‰þ7^_HySéÕizm*Q³ð„?ˆ³:–(·,‹6‡A‡D¶â qLzE1˜Í€>…) ×Iêeõ^ÍÙ’ðu@u‡¬ÒÏÌä"ùº÷7íi\íNL¦–8­¹8}·ö(^RAQ­iXºïïUFÒ¶ÔagæÙ;x²®RôH‹ŽPÝÍŽÕƒ½¶H5C €Ä;™¦¾w–Çʉ å qsîs‘ÁÓš-*${-˜å‰Ûƒöò~ÎKî>O<¬M£Ëù³Ò×z­S`Å&g ¿Vå½ÂöÏxu™ò³×^¯÷Î5‡qì·‰ÙRSJœ*ɲ·ºƒ ±Â`q mÇ ÿµk) _~Jˆ£ó$ê]_&šÑNòeUJìϽ·e»ƒ]ínDíê/)ÓÞ†OÑ-2¥"Ç®*LJM=â܉³ä¼^aü¤ä°’â9{|§^FžÛ.÷‰ÝÙ»_6.æºûUF”¥¬ Ó`ó˜ð9]„E°¸Yýb¶]ì‘w;¨eØw|{íÁÞÝ8о©ÔöK”;¦–•-r"ky2só/Ut––ós ­Õ×vêò½É\èÇCHÓû]'Øi`›xàãª|ñx)ºÆêÚÜÍݺ6ú‹oîMl:›à©ÓƒTyŒÈB\) ·›óB»ŸT}Ó_œ°<Øb6ÉïÏ’øä”rÓT==UžHOi½¬/Çî•”Z…Dz?’³ªé Rª ¼_ä_‚¹SÐn“i0r¡Ôð;ñ×°[À~IëTñ½ÙøpÒÄ»Ïtnå¶KÍÒioþ$=ØË]_çý†0•üE¿æÃù8ôò¬ˆµ4Ñ_ƒ™•¿u5/oÛ~‘ ÑÛ{Þ´ú¾V^¹*©ƒ}>Xÿ*‡àNªUyDwþ‰¿ÃrôÚ'ù,i¹$ÿKiX¢PLj½ç›*·Ô¯‰–ùõdá¶ê•÷èÔj’ÃLZv›•H VCJá—œú)JøN¼…vœÒZHµ&N÷õG&î³oñºû¯D<ˆô7£xt†¯_,m}uCâa¶òf-‘ïfk{I‹¾½%^ý#[}Fò¤B¤&öeçéÔæç*7”¤‚$ vYä;gú`G¦t VŠ9ª-s¾\­ÍzÞ‡ùAÐ}*­¬¡Ðò¼JìÄV°>+õ;ß[ÓR®ÛTš-"™ÅI~T‰¯$嵩}ÂQøC®3d-¯WµUH6cffn9q~õ¬~ytOmºD4¼$ÉQ”CY XS©Ç§®r5öކFC¡£í"Šþzÿ(9«ÊEn³<û™OUˆ·¹„Õ[¦:âIBäÚ–>¸í¯MŸ[ø÷™Sû îHó.JˆOPžþB‰ÿ=Få~Õ·™ÀÞ û’|‰’§gÍ}õŽý–ú•ëùi–Zšêƒ{— ˆŒ©DŸ˜Üãéíe˜Vø½UâÚŒ|Ê’=ÊN5‹2 u…z©ç»e甌g»Š#¾¶Â.°Ñ‡²Þåàä6 ¯!²¿\àgúê<"¬ƒ“ÎK¥(J¿?mÈ—¢[-«óõ[bºöòy÷ô?]0¨Ç%ëå…7˜vÈú}t‹É1ù7“Ø{}ôÐ’öê¥ vc´Ä9ÍKŠò\Âp}àtlM´£©„'ˆá'ÞΪÃÓÄJ•õ×N±ÙCưÕ}[éx´ÔæR—|ÄßS‹½˜ô×G‹Rt¹â£ª÷sÝóÆëî1¨“¤K´?H¤³ÍG. ß޽¯ÏÑ‹x“óVçôh·•Í÷ð†Úªœµ8¹öÜYVëZù)ÄE”â[$žÿÝ”«Ô¢à%ð'·sçø¯Žžo‰h­­Q¢#DFˆ"4DhˆÑ¢#DFˆ"4EV7úTI]A9M¨- E¨Ce”<¿F*XAÏОÇVb-• Šê6ú´úS\f÷.öµl‰ÕÕM¦±R””Ê®~Ï”fsæ-$«a8ßZyÔLíöŸƒfü®ü”2ÂG³l·ö]EQöRÒØÍñbPªSo›E¥¿öu^º–¸c¼¢ñŽì‡‰B3Ïå*ôí®]NŠ’'’…š9í›äìûÙ÷|×½¦éf”™´nž³ÒÉfm\fM“æý¶U^ù¹,+Í»–ýà¹÷F®Ì–)[I>–ëq­IϺ•—¥<D¦š *l¨œ¥C8#ËI2‰RÓÓÈS»;a{`ŽûÉ¿ºï–y¯´è: =E$Þi»9×1)cf}‘ õdVÂ]·µÕ®Ù}ÈT[n—²•ZÅ}ûïoéʬӦÇq¨R’Œ¦*”V”•öãë߇^ªµb:<€±ÄÙ“¶É?ÝuòšPKW<½2¥ó “ÁœšÏ¼Ç…íùðO&Üä Gaôöüõux«2èe\“õ÷Ño…{$ü©ÿŸü5°¨Ý—M.Ÿ"µ3áa²ì¹d¶Ê9”©8ž¥ÈúªµA„A¬¬Ý©“½ÝRíwL™bö¼â±V (Qi N¨Ÿ RŸ/>ÅX×Ht“FÑe!ë Ùü÷2õýòyÒŽ“mhZ7Õÿ­=€ð¿ZÝ—UOwÙK#(lû÷Jõâkº{Zmj6h››m¼²øx¯ÐýþKz=½'J+Jwö!ôqþöò÷Š©ûÍÔ¶âu-ÙÆâ]UÕJûÑ$ËTx}¿u,$†€ü’5ã*«%©“I¹>û–šýÑžôs@ƒG¡t|q[Öfb/}¯y:a³J‡OIÌ6R¤ýîY_åëªÚÅêNS.«û²\r$¡)+Km°”OŸmjïu¸{N§mµðÿÝm×骡º´6h/ZtøÓ&¾ÌŠ¢XšˆñP¥ºèmX$ ã¾½-/Ej&Ñï^&,Íw³äûžÒ•TÞÉY‰E†ÒT¤¼ëN;ÅOqSx>@âƒ÷F½$Ú¨qhh­öòdîÙõYó{¾\¯“³]|£¥¥ô¥K”ý*ãö4uÔÆïgvÚìûÎø›q3¯¯?Q~bé¢T!Þ3¬jŠ*&¹MQ¤GR1-ÖˆÏ˪*Ö:K×h?8ˆµš·¾-×àOïàªùÒ§ÑŸ(Í£jc*hê…Á¢>°½ý2ÃfYt¹òå@qÈî­.º€ ó ëãl×¥ýÀË„¸:ØÏK²•xt·J·k(¨þ­6ä:ÃlžRå)A\;2•z¯·tN¢´@GÝã¾&å~=Ëù±åËGÖÒùB¯šº7ª,ñªbÙdüÛ‹)zmBUaï:mBt÷T+ùqX‡rOmzanªùy‰b^.CåÝ)ÈÇR …r8ÏÌIϯóÖÊ5ä¦Ë|±è¤à…h¶ÊürÚwæ÷j»)°!<ç–=Rîp{l(¼¥CZT¢„•6ŸQùë\7R³Ùz·OZ›Î3Ÿ¨ÿ!¢Ç¬»ãP^RBˆÂAì©?–³k(×{ë¼p†]'¶T qüµœ+b5Ðͯ#ˆRƒm„ö!j ½ÎtÙdÄ‘î+žÕ°Ûj]ËxÛÜG—ÀJ¨ÉKlî2HÔG0â"É–ì$ýTļzðé¿o)ræÎÞ›b\(;& ‰}JÐØÁççRGmAúFêÝüŸªšõÔÖÞøŒm¦ë˜ÑköEÙ·µª4† %×"E& äœzT‡’}Šøk¤RÒº?JR“>xmÆÙíxbøv¯¯ônªŽ¯ÉÆœèÔ—„ãŸõØ/íl¿½“§ôL(R)~Ìò‘^½k,2²¿ƒ„TÓho‘î¬-·Os˾½†Ç<¦6³“îÝÃrù 0Ž.KOõ”Fˆ"4DhˆÑ¢#DFˆ"4DhˆÑAê1å=Ô¤ÆKï2†8F=Ãà)e'>Ä“«£5GúÎðÞ‘]êúáÞV%ËÔ «žg)UÛžäz–vŽn— D–Ò¥-AD«'–¥©b™õ,ÍÈ‹hŸ±¸5¸qQÉ0½†c³pnæ¤ýä«G¹~ÐUñuÒ mL8Û†±R€©s%Çm!´GKhC(@ͩwôx¸µ®åؽ.‡¬£¥ª&¬²Âöiù¨ïb:¯ÕNÏÐwNÞºØû>y4ÅI¿|˜¿­8&#N“ê ?†¬ÕÃV Lzœï‡~&ö]¾»‹HhhÎxt„O;H/fc¶®Wþ²Û¼Å»»©oíD«nx]´ûI›Ê@j׉RC5—V ”¢:ÏÊ¢JÐ~ü†¶œìM_=Ök·ðUèÂÚcfÍÒ‚,ê³õ'¡±I©I—jC̳n-µ%XP! žÄcYhLº£¹[:¨£S; vºPgmî6ã¸óôið™láNMGÂ!ýTçèð“"Ë¿%é*G|1ÊÎü™ñ?Âî¢íêº×Ø'„oÜ»—u:’Y£Û« §´@ìdË)û7q+´ÔtÅ«¥æ“¾ÀÝïÇä½ÿDüžé.â›Îc£§mç/]ÿ݃äþ6Tר¾¯º•ߨïÑ….}iÊsËEØm0’à>‰yðCŽèJŽºuàô­n¯Ù¨»²Ö÷_çuús¡žN<˜ô|šªJª©fûYŸˆ°·e›.j#¡mè Óœ’­¼Ž´‡ …E[žrT9à…ë”ê¡t;LàÄðeÞßšú(yaè,’y¸éˆ®Ùagµ»?5(í‡%:%°Ý˽;–ÎÔP×€ÜÔ‰2ç¬öÂTé@e³ßÙj?êuiºÄê‹ÒÞàSÄúæÛޑ葟C³Ú•tçža*i@u)P%>„«¡ÑÍ$’H^ÍÁòŽÅ“ÊFMÞ}% Èía±±3îw³óá’Ñ}òº îÏL6nÖlêZ£5¹4vàF‘P_ú%¥Ma!"Oy!e/¸‚¬®ÿ×Ñëê¼îŽ!pi,ÏÊ6âûï~kñWF4qÑt¾·¤Ý.¨|ÄÞGÁÖœÉïÂ.ãquo–IËЩ_ØÍ”­meî»Zdë& dR¤153©µÂÉY÷HJþéÛµÑÈ'ÑÑIC3³á{‹¶ìóuC˙ѽ"ÒôÝ,ÐXŧ HÎÎ »rlóÏŠnn”•[µº·E[©›Ú"Òý>׆ní͵Å.yj–âÊ#¬ ¼áó ~E!^º¥Sk¯>*¹[‰lÂoVüí|]Ž»°c¥9:7 z™ò:™__]6ë´BØkäÏ´.;— K¨}ººn¸Ñw&û…¼ri`·löÖ"ãmý4R†äI!-KZ{©Õ ö®²×SÊLõ¦õϳa„wä×m®Â{åÁkˆÒ”ôå7EèÞ—YÖ®¬|U‡ÍÀ:Ñ·6»·ª«uô9J¦VjµŠöêm=‡£)émŠT‡®Z5¥¬© - 6¦ÐR’?iË^^^ŒS™¼ÓN ïžb+vp_¢ò媧††SVQ‹ NLÐFnÍ™3Èìïwû¾ôöèQöÁo™­Ñ÷æeŽ$'ɪý»e<š5yŽ@©•qZ’ ù°¥')=Á9¹¢´LTS5DsI¸êo|Ÿµyß)=*“hÑuÚfmàãR8â;dM–,»ÎÙ:¸/uÁ`=P”š4úmq†)GÃHR<ôç±m*”>ƒö…¦ rÅóàì¿4AtÈ @YûþnVþÿE¹)-ËõVœÓÊ¿‘kϸOÇW¢ª#X,¼Å~Œ:iJ”MÛÙ{¥¹W  6äzzJ“È,Kä0HÔã'Þ\ì²ã¸Qà¥*Sp* $,w?RF4KÕBŠÝg_·Í&†¤¢Kt§>Êd3KK®:Ú½’IÎ=†µy6v]–ñÄ.[DìÜ×Ýq™‚ÃïËi_ŒC.Çq°ù#¾ œ~'X×Û­’×9aŒî’ê]ZöûŠ5sU7Ÿú5E´“øT3¨¼G’èÇ¡j¤û0,ûSž#ÛmœSo.Iµ$e@»- `nÁEJþCP–—ÿdÝ÷]–è6’?]™“bäñ(¯<ÀU2ÖIŽèì¿5pŸ¡* FZNSáeÒƒÉ쯳$ÌÎÝ—Mú§V—ÕØÏŸpÛl¤çêàIþš_;Ž"++Ñt.Ž ™¥w~ædÇÜ-ÜÜ[våÐûhDHS똎(RN{ /ÛPÌ3¼E “dºT½ЇYæ8‰äv½­ÃƒûÔ×ÕÛ³»)ן§í•±ºÕo²¢ÜT[F®¨SÝÁä’RGÝá˺’ñ zI ÄHI†ãÆÖ¿mò_(pÀo 3¾vßl™P͆·¶OÅOmní“{bìNž·¡M‹Qnä¥ÂuOÔ+ŒÎŽdG¦42„1“%%´­\J®øå¨"®ªy0UØ€òÄÌÙ»vî¾Yö+jÆ-eÞù5»TÝÖžõS)4õO¸4öË4¤°àm-œ„9Ú4ª¢>bÓÿxçÍMq¨eÓ&#Õ¤ ¿hò¿½{í3è~ƒQRÉ”ºVR–O÷qmFÝÎì´ŸÁGfÁxqm=ªR”;K¶ã.JBq™ryÕÄ•ë¡N÷ˆOÝ|íúέ¾§XFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ"ë¡_ÎùÉæãnDÛ «ì:Úø¸Ã¨eE!CºT È#ZÔåN}Ë öƒÞ²ó¥mê»×ú>{esϺ«óknTšc“dÌ[ò‹ç`0§J‹c8ݵÅé4žm (Ó;çl¸ºúw’O¤tõLu€ÒZ¯œY³ïÍ[òÝuôõ·•öÛ K‘jÁŒüXÏ¡+e÷i“ÙXQλÚb±è)䨬ַ{¯™ôCCžœ¯£ÐäNÚã6wmøXßü–hm}VW_}N[TMä¾*ïÒîéÿ)Ŷ^mrq ²ÅµñA |›Ai0ó¹Ok{³Üÿ üï•èíÑ®‹ÔÉ¢4T0l‰33;qr}ïlß}Ýhó±ûM¶SÜ;â}ïX·6Á´¦“K«:* P–‚oÅA$‡¹cØëêÓÑèâ“I7³Z׿×zükAÒ 3§ph=GLÕ%v6 .ßvöÜËÍ6öïuëO‹^¬×+Û ´a&e9ˆ/“vÜèXä™2Ój–V¤ v V¨žŒªÒ@5Dµ6bùÛ™»åÛµû­“¯C.’èÏ@§:Á´¶–|‹~¯ ñk=Ë…†åÍÅî)=}ôÿ¾ÔÕÒ[Ý­ÜÜ*Õ°•Ø7dz„šzs…:”¥jOeOõq¨¢Ð½­-L7‘ÛïßÅ¿‚Þo(½8ЖÒRhšzHævµà1äØ¬Ïš§½eôå_èçqitè×Ín»iÝ2…SÊš“„™§ÒOÞŽ°Æ¼gH´)h¹Øc7xÉ®ÙÙû—è%ý6Ñý5ÑòM%CU3J;Ç?X^§smÝsh‰mê¦n8y¶$4õ ¨žØçÍ íî1ók˜sÒu%¬ç|Ÿ¾ÿ‚öŸ¢ô¤zE¤Ž*b¦äìí ü,ÿ5?Øþ"[G°û G\[½y^ôÆ|†)m\ª•O^>úT’Ak#å@üµê):WAENÞk<¿x®-Úß•™|?¤DºWÒ?.¸é)¨78#ô¤ÜŸïs$Ö±½CxŸÊ]ù{T)öfÜÒR\Mzæwì«v’ÑÀÌv•1XÆ‚ õV ý¥´±yî“<ÁËû£ËáÚ½é'~K©KFèyª_¬Á·)?ß?Wº÷äÉ“vo‡Gý)JM>Ô·®ž¯¯¸k•Q~BèÖlœd$ÉÜØHWñ zJ=Aâ†=cûG“xàíÜî¾CÒ-}.Ò¦AFãE ðÇ%»Iò¿u¹+PêRñ­[0)v¼únØPeÅL†©%-R€¤ƒÅn`)DdwÕÉ%¨”u8°· Ùeà\uu¤¯Z|ç'†åoÅÐ6•TäÞwEÒÝZ¼Ú—ÉC¯ºëi©G¶=õ[ô$òâÕ“ºô”Ý7ÑtØFJ1嬙{wöžè× S¶Çrᢧ*ZV#eQfr’RÚ€äpo}S}Z´î+пNú/8újw,²e!ÌÞKâ±T©[Võ÷sß7 Ôèr¡apÂO-CÔå@àmM%WV2/ªâèÝ!¡¥-uD!6çÚ¾ýÉ÷3a÷6ú·P.dÔKÅ”ºÄq™>Ë–µqÇ®¬þŽÃ ÏÜÛ™”AÒ I[ç¡häÚ.àn ßIzÝÚ-«zUÛvÙ©‘[-ü¦ÛO)}ãÅ$•¯×æÕ£,:¹ ïØÖeÃÒ*j<âŒK¾B»¿w&ìN‰Ø›“ŠëLý§ °.¹|¡_¼ }ÛZQÐENäB*Þ›é¥n”§ä{[{6ï¹sn­ªõ×¹kêĠ¶ÛÔ“OŽ•=TJÁ>R“챎äzë®G‡ ^ëÄÜ£¹ ;w;¤:.øZ7ªÔµãÚôF¢ÝÕÌÇã©óËýH<†F³©”Ȳ·gÅhuÒÈMHNÏÚ¤zõbÙ¶à×%×e¢-–"¦ÂP¯‰Žéû2Éíí¨˜®]\Ö {X¬Ì£Ý¯ë#k÷2¼Õ-Ê]VØ~[Éb ŠªbjÔ ”¤~RIº°tÓ€â!gnÎ «TŒ½Òþîouµ5&à!ºmvâEMš|úr\)1áÌäÙî3¤p”›B9Yfj­VÎ÷RåR“O‰q3(øU](ý§–S‘ƒß°öÔ,x•‚aïeئßßÖ­z" ^§y0·n{ÒË”ê|p²9%¬åNöV µÍ7wß>ë/©éjº½0ÔafìÖ³]ÞÍÇ’éþݵíõM¨ÔêU0–’÷—‚‚UîÉõõÖÚˆ˜6nê¼],ª¨œiá„YÊöÏŠ÷¤ìÝžÆãUióþ¿ðÌ¡úb¢­C›j#)p÷†{çZÚ-n7ZTé½$z:*ˆGTîîÒ_æ=‰ïK¢ÒéŠT:} W¹!iŽŽe ¤ò#¾¬¶[#’óU5RzJ‰í¿?ŽIO™M»7 ñi‹RuÃ_•,¢D¬7C±R•Wc¼å…®ÿêN*ŠmFETÑBÍr-äOɹ§eÉG®Pöf§hPÅ@2úE5‰- ¨É=ŠðsX<F×ìâ¼ÜsQTéa’ª¬õoëÛ þÏr^Úy´­õé"‘N»©ÐêtkÖÞ—F«S&>aÆqI*Ci[‹i Ï}i£æ ¤“{^÷o›*:jŒiô¥E<%“;Yø½ÕHð®éi®—º»€ÕyªÂNÇÛ“ê1"ÝmDˆ¡*'á2£åº¬ŽúÈ‹GP•³¹äùY¸ªÕ4rÔÁ«‡úÃYíË+&T×nϳÊMoª½øT™*H!N²‡Ìs€=@{еÀè©»hÚÍ ]iKøüÙ×Õü¾G«ém&ƒìè`Œ»Åžÿº~åýôÛICÛ6!ÆoÊf†*Qî€Ø €’ué\0aL¾:}¥!k dhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆR.½B­.¡OΫ¬ß¿Ãö:ÅOôY;a¾Ð{ÖHtäò)ÿ£S´%cÌîĵñ¸ºOÿޏ]3'Ž2íüM}‡È$zÞ’TÇþÂ_)ÛÅ¢êE'míûQN&:îÉè2Z+ÂþB”p9ÚÏNê0à£Åk½ßºË™üôQO¤ª4–µ(½¹byÿ x[mÍnéû½w3mȃa³&›KqA/7Öãªø¡ÑjÕZÚjë^e9×T„ÔßJxðš¼$-ú`ŽýÇéÑÖë›\÷…ú¶Ý½Û†åö¯ä×¢z2ô•5Tá}%[_¿­cñ-î[ù;#»¦ºöåõ÷£4õÖ´ósc¥aicáQ‚K€‚T£ÜžþÚ¡:¢¢¥«º ¾óòüײþP=;ÑÚ+£2èšÇi*j[d^×ößu­¹»_$¹âé½TÍÖß{vÈ¢¸ÜÆ,Ï ‹ÑV—œ*Pm*—IßòÔ=6Ò±UÖŒ0æµ¯Íøû—ŸþMªÑZ}=\Î'TìÀÏ“àn.Ý»ÙA{ °wÇU{ˆÝ­·6ü›–¬T‘%Ô‚ ÓN9¾÷ÝH'’xœF5ÀÑZ³I®˜rmäýQñú¿uöΓô£DtrÏ´ÌÍðmæoÈ{¿Ë›54n éÓ‡…Ucàë>WTDÓÂJh°Ö?U­™æBd,…©*ã”üÇê†Î¾…£ô= â€u²q2ê·ì·ãŸx¯É=0òÏÒ‰Óè¶ó*GË'ôÆÛ¶‰º·äÏâJªõWÕ.ëuáv³#|owê´¨.%Øv”z”’4œ¨U•¬u×|ËXeˆ¹¿àß^+å4”qA³[Ÿ~9¿Óv$Ô¶FÛíª¡Åq¤CgÉTXÑØ@J” €÷ÖÆ7R Ö½¡Óè{+e¿,XoJ£Çqå¶Þ]Zü”“óþ¾Ú±¸vUC"rÚP—ˆY/M7e¹Hµn´¦Ì Ù3Úºi¨ËÈ- î”séªu5'áŒÉ»šÿjšLH¦ òUxºž¿7ΗÎèsoYa…&Cf†Í2g0puµ•RqS- oŠ9$'ìua©©q i ©ºWÎÞÔ¡U­ë²¹O«¾ÀÿœÛm—Ü\ÎHǺµ·žNa©™E È!šÜë:þ{biÛa~Wæ^0cOU]R4‰ï²¼õŒ%m‚“Û—ïj]V0—W}qP×ÑŒû1ì¿ÁJ}Ð`xïD‹‘dѬš ¶Ã3kÕ:KE2å¥ðŸ.:VOò+ QÏnDë½M¤5äû/—¹r§ 8A±;[³z•7·Åÿ¦>‘·¢>ÔSé×ÅK¶E.£pRdSèp)R‰ |¶¢y”3œ‘Q– Ü„.Ü‚š8ÍÄs·%X‹+äâÏpl)CéGçCN8j,îܸø)Ž˜d-`³æ¢ ÷އM9PcÚ{oDÝíêI j=Z¦¤ÀŠú‰IqÂòûvÊ=5Î9ÊBrŽ6kóWãˆØDH÷&¥—úF¶ŒZáýdéž-„ïÉ"Uµ^nMI)'¹(!!c¾NHÔ:ê®@쬞²Kk&7å}Íà¯&Ö_6/T[8»çd®˜E¾…!R£Ç)LŠrÏrÔÖTC¬¬g÷“…z¤‘­À‚{ˆdmÃë{v©b¬(ͼó6çù&í¹P¥9Ôå`R`˜¯Ê§bh[©i.JV2P•FI:ªåúÇW;/[PÕÑxŠ ±Ï`74ᵦG•v.#SàÉ©RT’ü&ŸZÜþáx•~JÖG,7Íqë˜ÂÎ 7Õɸ­—çðMÊ$32ö¶äÕµ+— ðã©ÄóZä =3l6c!.+»Xxà¢Òǯгo;qSH[1A]^uªÝÊÛ·u)b¦ËO8~´%9QRûçàêz`‚3!%ÆéeUav¥š2g>]žõntäîwE7¿=eÖŶÌÈ]ü´®CÁÇ›Çb´ŸC‚5˜¹HC›3¿Š«]SÕ4r]…»÷&U×t¿kt=Ô5vºh³7*اGÚú¥z”£Â¯LN;yˆærƹzPÚšŠ¦@Ü!a~;nÍoì<™Ñþ’é~†Ñ¥Ôyžgo÷qÈMïveÑuì»u/¾Œö’Hõ·Ó¾Ø+q&Ee %ª›Î©ù$œ!¢ûmc÷‚»c ÔšŒiôM,›äÚväýlün¸]<Ó§¦:Q¤ô¶æyM™÷ìÈÿÒÌËgöj˜i;cFBÿ¾v2y^ëZ’ ‰üI:ê\—•éÖÈÑ¢#DFˆ"4DhˆÑ¢#DFˆ"¡~#×/ê¦ÓõCU)+û3jjÎqúÿ¢ŽÚÖ qS˜·½®mÞ³¿¢N™j;ÙàwÓ…­OªBFvUo Ž¢òJOˆÓ‹Ql÷×û@ì2EÒ-ç¥MµhâDýÜ>?ŠöÞMú_KѪêÍ!T.RH"ޱɆÞ9¨Wª^ ê_®ÁGš(1’š5·}¼ÖÏÌ¥§µóí)¤ ¾ªJòã·býЉFz=…“)IÞY»·ƒ(žëº*¶V×;gFŸ"+·uA©5X͸C@9ž(#?{(ü5Ë–GŽ5bÞ÷.õît}>‘ÒÁ¥&œ g`[-ï:¾UóÓGG;-LÚ†«/N´æ©ªŠiÑ—!r™RJ’BAR›*½ê5ôž‘CWO£écÑÂ÷•ÝüY·²ü»äìôI:¦fécŽ®f´xß 3¶ü.ù3³'ïXè¢ß}]õ{ÚŽÍ¿tØõX´™.U]Må6´’K‡”ƒØ/WzNA&„’J¦`wf³r’Û™xï'>y¢ü¡SQè9µÎÒzÆn¼,÷¹7²Ùµß’¬;­âßX¬ôïG´¬:›´ÒX„¨uŠŒ#g· {`•6§÷”IWÌ@õ寤:oUQF4ƒE•Ç+ö z¾õúD': éDúk¤…_–(£,Éßý«¾N"ûšÌÜ]¸&—NÝÓ'lëû¹¾WRö{bb/™™7)¬ÝËõ(ŒÚR¹÷„«?º•wP×Dt\\<ëIÜGÕëŸäßMfÚ]Ï)^Zét¿¢´ Em­þÊí¶÷ofìüÝ·;g¨Ï Þåí‚6÷chïlVÁ%Ñŧ«Ëº.æpBž”øÉir%@)J#Ô{‚8ÚfÛp6ï¬øßzü¡¤*«4•iiM-3ÏR[ܺ£žàÌÍÁ­àΠ+²Á Ùö»bF‡DK<Û¾bå¤ú©Å¨òZ³ï©=\"¡”®x‹üÓxò‘—~N©å£Ësؤ“s Î䟗U{néÎ2‰Ê[óÂÐT ‚õöž¤Ã}¡P‰íb/rÔ™*Ý?ØR”$¿NJ>RN1„ãùjl7T‹mQæ`ÜC¢£È›W¤¤R-•ü’B‰ôW¦uÏšaÖêþ*ì Z¬J¾Ñ鬸óÒXT”†ü?,8¤’¥pläjœÂOèäÍø:˜pâw$©[¸\­ Œæ ´Ô”Ä -—Rœ œ`öÖ¦xÏ‚ÛÃÅ!Rç Úú}h€ŠkJ£±îÇHOd¢9\ç—`¢Ö+ïà×2f×tŸÖ&ãC†˜£Z¯Ä§OIýœ™LGZV[ú) O·¡NºT@lE¹þ*ž’!rkh 9[³œnB׿ÕÊÞ“ ’¥¸¥,žGê{ fÉÅjoOž?›u°}í½™TÛûŸs÷jÔ€í>lI«n--¥cáî¥9„`ž(Æsß[„ò‹aÝÅDtØŸi#Ù¿¤>Ýïs²Æöì6ÝUl¥JCÈzÛŒ¦§ÐT \%){“ÛúksV,l}›– –'ÙÜ´s{.HÝQѨ¯D³¯=¯Ý˜ J®:ZÓL\fP}° –êI)ã÷’SÜc²ÕÌ##K{>çáÙüv D$6šªÝYxˆí—„E)µÛKV=ÿ¼ñTÖEm¢õ2ÏóЉ¬€d<¤¸声 ”O9âÙÊûßòZÃDZ–MõÔ-ùÖæI¼wVî¬ß—ä”»=ÒcBI9òØ`†Û°Hù}5I„Eÿx~êhwâ>Tdü ~~ÒØWêT¦Ô8•%I> ú=0¦#ô‘ÖåÐüÓ7>ÑäN€èEj™’˜w(£¾ØùU‘žøì~oQ­&н‰·?×k}RÜëx7ú£nnÂQ·ëkj0)T+¦”™²ä8ØYåKÌ’pP¦T žDzgR“•Dèò6ßø®žª§ [Fék•=îÙä%ø³ªÝ;¨]¸¥ÇjT椉8}·©Í˘òÏ®y2Ò»çêuÇ ¢qÅuô9:C££\bîÍ•™¶}ΦÎo}½ß 6³/n®Ê­zä¤>˜•ºýB޶Q¨äDeÌ)D„œ¸ œqíë®…ÐH/w»qü—•¬écQÚ.Ïo·É8ª/=—U¥¹Z¸ƒ^!¹­°SÉ`ŒV Pú«a¡>–Ô¡0Æ áºûVðÜ‘w¦ôÞ]+ÍÛËcâ¡?FcíM9™GÚ/2J„u¼pPIåëkKFZžð\]#_=eQWT=Í÷ÿ’k\·ª]ƒénlÒM‘¾7ÜUÔéR"d¢47Á‘çUpB¨Î¼ÿIŒßF…ƒg”غí¹}/Èáù¶œÒZx³ó:Wq¾A—âÉÝÐ…>¡Ô÷Tno<弉uëí­¢·xãËbN€¿,ýJ¢ïg^ªfýlÇ„BÍø?æ¾5Hd4@$ÙÈîN÷æîÿ ºØ ¦<PÅ @ ¶†«««ßDFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ ëïm*;õNÞÍ·£Ì‰M©îE°«i©²B–Ü4Èe¬¥?2±ô•£Ö3G{fÈæ!´YYgU7Õ¥^žìþŽ6Šãr¯D´˜\›ÒêK©*“%g“Ì´RHJ $)9íÙ>ÄëÄô¯¤q×Kæoè…™‰ý«vüø_. ô‘&ÄÁüõÓ½™ÿWˆ›ßvþÏ‹ñeWmû’˜ˆSè´/ôJ45¸œÉÁYüßQôõ×›†póeöjÚj©§j\ÞIæùäÜ»uP\‹‘R¥Êt‰³C¤÷Bÿtä{‘Û\W"~·ï(" QaŒrf·æ¯®Èø¿ÚöÞÕQaß­â»Î@\Š+ü5u Jœ* ¡G𓯢èþž„TáD.F-fv{_¿ø/Êý.þMºJ»KÍY ê£ji˱³âøá¶öä«·Výj]][Ö¢?p´ÕÖ¤¼MØ‚K¥åvæâŽ ¯€I˜þKMiÚ­)3ÿð‹noÍûÉ}»É§’ÍÐØÌ[S#Xç-öä>Èÿž|$úÖmç†æÑRw³©Šy®]õvüí¼Úd‘ñUöòäÍlŒ¡°¬1õ V=އèóPᨩ)ß1Ú]­ð½»ã¾T¼³KPrhŠIal¦¨mîüB7þópê»6ÓÔ>ªº¨Ü.»71›ówª¬Ô*0Ï—Aµb~Ήl1û©m p·õQ$“êN=ÑHEy{þ õïuùî––(cÃæïÚÿ]ÖITÖÕ&‚ëþfB¹)Ç~èúcðMTæv_j¨<õ—ðŽÈŽ)ëQ(y`©Ô+à“ŽÀ7-ý¥ñO©=!¨m)Äù”ÄòHB1Ø“œ÷õ#[_Ák<«UiɲÊ"€ñ”Á)m.`#¸—ã©7Ø…Býn²ÙN‡:9ŸzôµBÜ;T—h¥õÇ[ßµmjW.ŽJ¸ú+ `ýuaª)ð±]ÕsŽ\\dßY’§'ªMÄ£´¦eš=Äêf=Ä»ÍCm‚Pà8Zp×Ï^dQî]8ßV;Y:Œè¬ÇûBm\­Ö™¥4êÒ{:On'êu“ÈZ<;Ö»ö’ zðMQ•©Î Gi9Z–°„U{}L ;K/êñS¯‡_†–åø‹×¢U©J“dm ‹“îùÍù šÒ;¸ˆaX$cï\]-ôÍF¦ÜñgBv•^»¾TÚr]y1Jér¥»÷B”By`ãc¨)6aÈ>·~j´p‘²Më-©VÚ©ð#ÄHO͆ò¿”œz¨Ç:Ð…Z½×k0Õð¾QBR““€“ëõ:[‚Ó¸®ªE¦ÃÁrÞuˆ±"6K®Ciî s÷‰á>§[jÖ1ÙlÇ@»›±}Öî¥ùMS¯inÑh.•©ôVñÊoå%àGlz7œzêÃRŒ~¶oõïUä› ʹxýô|Ô7·¾–5«pÝvíóDf-}Š<Éz"*ÓkR ø­´rÇǹµ±RœÌ%nÜ8ø-¡œGd²Y·*…Q£EÏ¡Üt¸ÅÏ(HKv;ÿ„­@uDᔀíà®4€û"ìëêDrÜj=.)—U®Ô¥SØÈHvKÊàÚI=€ä{h·ý¥tºðÞ®‘úc¹w.â¼ö®°»3+­Ûty+zlV“’âÃ…´‚¤r3ûº¶ô`âød»Úû²î¿ðUF©Ü°àTŠ+̎ÉIòd¡.¿^*àÿ-RûÊÂØOфܧ÷3¦Ý×ÙêÏ ´›r«ñ|sLxÓ[[*HϨ+_,}u5!áðñÍS¬m‘/®j׬×6¾þ»mr£šv\i)-²_`{uçkaÁPcâ½R^&!~ èø8ÕL}ÒŒ|Ÿ…y š3ÙÇ”tàžúŸE ƒíeR¼ïogVâêª39.qm¸WœÈïßúk´y*0æXRE&ò7s7…“Sj}R­÷«1ÜJ¤¡¤ ùíµžKíŒkHHÂ97™Ø˜“;§ÝÁ7ßZ}5\Õ÷šYÛ-›Ÿ~W²Àa-¸ô7”[À!Klú~î¸Uð•F—Ñô2pww¾þ6f_K輟£üšt‡Kqžaˆå»=›ÁI^´µÜ>[7W¨6¶gî}írÞ’’GÌéøéiBàR„¯DòŒ’M0½ñÉÿ5ò©m£àÍðZ–„ñHMD¦_º"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ³ßÄ—åô•nîæìB¥Å®I±â±-º|‰ ŽÜž, ”ó QIïü:ÚI^0÷²ÓV2¬·YSúíÓßRCmjîìôº ý”7«¬µK©ª# $§¼âV¼¨•ö(O]së4V‹jè¨f¦g›Û&úð_KÐ=9éàhZ!G¤¿U¥&iAˆ¯Á‡&Ë..ºat»³tX¯YŒí¾øû ¥R„ô·yÖœj3‹ S…aJPãÛåÕÉt} ÄqŒ“·¹yŸüAéd•QW~’6¼‘±30 Yɲµf5%_••©N4Ð `믇ƒl¯èi°áæ,þõÜôÄÓá¸ó®)-´2p¼ý?¦%£2aÍÕŸÛ†­ ^ŸéýBo=Á¹W 6«oä™Å”‚*r›ûÉm׈{¿*ùiò¢u’ŸDú?-¢ª&n/þ¬?73ÞŽ_›yõ+ºÕ­×ܺ̋–ø­¼\u÷ìa·“ÁˆÈ=›i´öJR;‰ô%aÙlÝ÷ºø%4!OGMËëé×T $¤!]ÇÎ9ýu­™ng~IÁBSAòR2Z|dxœ`(vÖ}nÕ9>ÒW‡Lª& ‡#™)*%%5’rÎOôÓejå–Êü¤Áj-¶ô€¨î8Û¼Iù^ 'HÖÏÕZ,<Óž‡9Ûn“,|O•N\u,6q÷HÉWËRm:q- éf«ƒÑ¸‘ÁSß |Œp!ÒÀ*YOªséZXãûI‹Ãó~}–[àÙç¿~2Û?g&^{wZ›ºµk }F% ïÂÅsÓ >”îT„(é¨ãš÷Ûw‚ɵþÓ+¬«ñ;ñ̬x—ô¯jãíÛÚkuáYŸ9ê»s•1(BRÛA)ŽÙB‚$äç¶}5[_3ݤÝÞ®0 ±ä¡ ý¡Nõø™ìÅ!ŨŅT5™(#ÈROõX»°­å ëcü[.–‡WR5PhÔÛ—ó„€OžêÀN}{ó×FÄ"ýËš9È+ùþ‹ ¶hð¬'ŒÙíÿrƒÿsªºö&%~ÿFfûM³âAwÐV·ç·Q#Ê sÉZ1‘õ ^F” J½Iz"J^ vÊ­ßM؆%6GÅ3%”reµé…“ªšM¿Xïe~Ó·b™<>÷âÝÛÛv\d[NË«¹1ŒÆÉD—ÙI쀀Nqžý³¬è€'2-Ù]C¤1vøw©b½ÔEWp7"»g°Ý»žÍÈ@ÈqN2\ Éyƒ–8ýuÑšD2_7uZžaÖêË¿ÅWMú¨Ý7ÍDZ[ݶοAÝ* ÍÒåÅZj©O¸ÓÁy?>”à›\ø€d¥(dÞ/quج!‡säýêÂõs| "öñ½a¥1…—³Ôë)…¶€„Fv{QÚà‘è?hÔr󤌳x¢}˜—®¯‡É6‹¥ÝçU2M߆G‘²¼ÛVæßtÏÓ]Ëà‹ck!K’œcÌB'‰S„êí~ªÅÍÝýîëç‡öJòjEº4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆe/éT…7¡>£ŸV8¯á¢øÉTvÿ{ZV=¢Úd…Å¥Úä ½‡³QVÝ»T’‡(»g‹yA9\§R’1ÄêÍC ô‚¢£„LÂËÐ5QAЧ½dºÛýÖR•·Kûkt Çu´.SôiQ’î;5¥¡@öÏ,zja ‘1vø/'W& rw³ûžë$D†®V˜ÊPªÉFT{ %C9þšøƒ‚ãÉ×ôÒ)1RÁ'8Çä¬G[GgY[wYêSz¶ösn^ãH¥¬®õ¬ ù1’r¤਀FƒØ/^Ó£0þ‘ªk³=€}§çÜß>Æ_òÕå(ô?è íçÓŽo¿Uñï~ÿeSÞ º„¼:äߪ¾ïî#„Ök«-S`§>E TK1Iû¡#¾}O"}I׺"{»›ÜŸzüCHÑ 1çn<]ø»öÿ–ædæR߯1±Ü§î`~Z‚=êñ‘d»ép\©6†Ùe ZÈCiQÂŽ=1ùêlî¡,,8’ízR)PcÃLW"M[ K.8p1ØwÏç­]ì«¶iB¥ǤÓëM©.EyÕÆ[¼ðV¤d+?CÛR>k-‹ãø1+ni5UDuR–ƒ!|T]NpH8üµ¸¶ÊÑÝ}[mŠ|Ç|–þÐ[Œ¨äd¤$ú‚0pG×XÂX{VUĦ̨ÃðµS4÷èí3"w•þ“oßï$”,qí“éë­§bx:ËÒPÚÍú½ºPÞ‹Srm¨¯U¦ÚïdÓ$«äªÀR²ìE,|Á*~ÇïjXFÇè÷¨e †ÖçW·Äã¢ûWÅצ wW»>*ú‰(¹©=]ù\eÖÆBg7ìÂПâ㎷ô€Ã¸ÛêËž¼O„·}f²^}"M¹PLZ*¿A˜¤•¦-^ž¸N¬{à(`ÈçðÔ ‡Ú·z·¬gê“:I¯Ìsà'ÈJ”“2žlé c#ù꺑™m‡ÚÛ= xXØûÛ²{2Îæu}Fi¯Š©HKó—)á…©·"Äk)F¾À¸>ö¯ÆÒêpÁf¿Ö’çÊXå}cäÊN›³ößI0éýIõ¹¸´ýÃÝÑÆÚ¦Jl9B¶$¬ù†-žsñ2RµaRݤÝIäqhéö±;Ÿ>>›ë,ÖÊOGL©§Z;Ÿ}ø‹ÛsjWíçÂȨ8d[ve-õ1M€ £å½(¤ò’þ0O<%'Ñ:X±IšÜ6:¹:®ûÓï·ñîÐ":š{l(¾Yc -®$“ØŽÞäk#²²s_¬«mzžÜ:Ô¶ØPq”8®'9ížýý;j„»ÕàÅ…_Ñ·Ûß¶:ÖÜ{Ñö”¨ö-­*Ÿœ¢KÎ5ÅYÇe¶æ¥¦µaì+7ãíºÎ퇅ìjD¦ê]-ÒÐ…(²ÃHykÝ%_.~ªÕ™?j­â•»c|žMùMŒ²Ómÿ„§ÿõ×=ú«¢­×èýÔÅòÔi”(üm¾¶á•´¬ŸüºØ··zŠ\ÁÔ›â¹3ìŸMÌn•1PeKŽÓÒ ¬y ‚“Ÿá?Cªºc½±n²³£ßõ|;ÞéÑ᫽èþڦ̻ÍK¶©¸²ì5¼íEJ[IJÛRHPåèG~_†ªÑ‘½@»¾ÿ§RÕ°4]ÊÞÛ÷õ‘X¾‘gÑm–#ËzG–&dž€PãÍyÜÊYR²UƒÛ±×z]kŽ"ÝôËšƒË(¾]—ÔÇF3›]e™y;5J’äª[Ñ”‚È¥1׿~ÐqR½1®IÓcŠXI÷¯KA¥ÊаfÀÒFíbãŸ$_)­ï ®³+5F[n»»[ŸC¶¢4Ð%SŒ¬º”²1•å-,€2}u[DÆO¤´…ApÂÌüí—ÉÝzn–ÔÀèî‹§|£„‹·Ò;>}Öe³==ÙFÀ¼aÓ&4o³mjm>=<œÈKL2–Ê”Â@W` Ïá®°6xãäËæ ÷7"Sf°¬#DFˆ"4DhˆÑ¢#DFˆ"4Dhˆ>‡DXçúK«ÿ‡¦ö˜ë( ]”Á!¼à8Ùb ?‚¿®´ªêÅûKXžÆ]É3féð¸7]ÊKK‹G—WŽ`Ç ¤!]=ζ¥ VVâÞæ½'I*uÐ1¬gß’~íûa=A[|‡ÊéJ0GÕ~ºèÓê˜qoÉx}%‹ô\øy?ÉfŸNÝ$Tú§êâæ²”š%»FªN«ÝÕ·ˆC4jS/r}ŨŒ2~÷á|wEh¿=¯8Éí»¹?`¿ãÃßÁ@4÷Nèº9ÐÚm;3³“Æ#ûR8løsìí²ŠüF:Ń×~÷ÓmÛ*2é;³š•J@Ã2Ü%É®'°ZÜÁW#ßÓ¿ukéÀâÂÄ#ff°·&úü8¯ÃrÔUUÖI¤´‘㨙îOýÖìnä¢z‡–ÊPÒTHl ö#×¶¢¼¦^1ÛOÅ-7æ¥M« ÉÈüOü‘áŲ¶‘ÉúÌ»­¦Cujt¥?ÅQTÓÃÓÏÊF{zjg-­Êªr[´x·ÕÙÍÉ\§ÎuçßYÉG0”“ßß²µ"÷%iÖú¢Ðß§>×&ç¹æç ÇPûø÷>úÝñ2Àýåûo˨E³\C2±KbJ[Te#™É#ô'Q—Ub×,InÓ“-ë‰èpÒÕ=Rin#æ$H ÉíœjK‚ÑÄ}¬Õưwb‘bøyÕi¦IªÆ‘&!>qS©ZŠHû§—¨:ÌÕ8"qâƒN'†Kå}ܳYáPÛúu:QE24FZy)¦$¶¬’¬’Ë\ö©'ÊÊãÂ.[N¼Ui«mIKŽ)( C§º‰Àu«OŒ¶VÏÈö%×6íëb´¸³fÌL….qëßVð–+2®ùìâN~–úÝÝ ½È¨Ü›kPm*ï¶+ÖýI¯:—YeJÂÞ-äpx'þÑ?å«‚xö‡'TÞ1ê“]]ÒM’7“ Nœ÷ni‘uüIãæÇ†ø>Cd€ †ß©ÕÉbU©ºäŽ:;‡¶±öæÑ½µåIz׸j³kìä=‚[y„`<”¨T±Î¤ku]¬ªJïÚê­õÔÿÔþñ®þÜûªmãv¼Tˆîº„¢ 5’Iò#0>VÐ3ŽÃ?Ͼ±»µÔÀÄw%½‹Þi– [ïºì¯’ž^R‰îF}·ZHÖZçá·¼OP±j ¸$üt qÕDªÃ…SU—¤sW–ÊîsÛR ž/G”b#‡k.õ‰û˵¯m?P½žâ˜?dÖ¤&0iÐân3…ûµ'?CÛU¦!Æøw:¹TV­~ކÐÔhnâÝ2 š®ç^eê)®–¨¢y*SâwPOHÝ$]»}lTЫÓ{®N½g2Ž.³©Å˜|»”¥\òSï®4qQ7™ŽnEˆŸ·’öú¤•x(µŒáOF@y½™±wå½g•&,Jt4B„Ü£â1ÀAœœêÙ•×z˵å¥ÀÑÊNÈàœþŽ´Y¾Ïâ¦ÔŒ(èߎµaÚÚ[>,8WÄW¼Çç¹;{ã êgiWÞŸ;vòi5ê,¤Ejw‘+!à<¶V“Œ¤÷ïéë©eFy ~oE.5Z´õR‚´-©M)r¢¡yLb’þ¹ÓzÕšÄ8’ޤݗ jN4öÛKªRÜÃ|’O§ÔK%™œÏPdÓ둜Žã%¸ÒR· 1ÅÐOÝ$w:ß®¢ÇuËvSP«òëØ¯Xi§#@t¨$ÉhVpïøê­NÍ‹ü•ˆ0í lÔ¶õ›vUVË R’ÈCG“„÷•ß~:®nX‡ñSoÙ.hVû*K/- LŒÂ‹©Jò©I*Y#ÑCÓ[™XvZ쵿YÓžÕ£ÒjM©Š²d°Ñ%lOmÏ1æ=‚‚RsôÔ1ÔcĶp·UtÊÚ¸×$W¡Jàüi-ã(%'ßÈšž)H2µÅ\.“v^W‰O‚¾çôäÍEŠ®ãíåUÊ…½[‰ÆB\(È$‚”- N ì»ë±Û8–K›'£•‹ ÇÖé3è×3–Ýr;Ô‹ž”ùƒR§NG‘&;Íž*IJ±ôöÕrŒðÉ“«Ìâí¬)\“¡Y6ìH+~Zœ%ô€õ$çé©$dUX€h’ßZµ½þ½Ån­‹žþ¬­A(‡B€¹)o>žc˜ BOñ3”½ݽ̤Õ¤+-ésôu®„Xþ¤îiÑâð±-‡RõNHìB&LmŸR~i:ŸÍ£ £|EÉ·{ÕWª¿Ø·‹­²é6gH½<ß¶îÏ¡ØÕ¿kÉ“äÄ`¼ù%A°ã¯9•8¬¬däjÀ™b¬«\p–w_ÎèSWf)3XRêu)Ò”ñY.OvC®8•zç° »®Tµ‰uáêaÃk/èK¥­¥þ6—mä4ˆ•«ZÝaU„4¯¹QY.8¡ø‚F¯XIs‰înB³¯ô6«¿Ûq½pÙ1cn•1êÄ EF!òÚZ”=x6Q>Ã:Ò©±BÒ?«“÷p÷)© ãõÓo-§|õÌŒßë–<Àµ,ŽØH$çTn<î®Û±lŸ~ÊÎèc¢ËêGs£.‡V½©pX˜ÐfStöÎ?cŒy®„ŒdŒ[Š;;É"§1c´1æ© §V] Õëôä¶›²¾õÃ5ÞñçCK«+ÎÐÝ$c\YÌŠòguÙ™°GÉ]~Žöî×ÜKFeNÓ­5S‰G­=[K CÌ€ pö8Ϩ±£æ#öÛ±W®ˆãŒ²gV[kEÅLç2 TÁz+T²OÇòmHÃek9!|pB@Ï-\Ça«86$Ö²ëSjT=Ñz׎n+š›Zr+ÊlÄG”™A%-¸±Å€ R•v<µV<å!/Џe²Ê*ØxöŽñì éT»vÝÖ7 λi³jé­¡LËDöBü©a°pÄ“’]ScƒŠî1­¦9c"-»¿/ÆÜÔÔ2F/fv~ë¶å¬Ý,ÝR.]Ù5ùºº*;Ê«š’¢¢2{ûêr+ƒ.T,X¶•ÔjÊ4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆd‚”燧TŠ( Õi€“èAm±Ìã¶´¬û ý¦Hs<=Ž o™ ¦¿i[®µ6âÂSëûGSÝDŽÚœ?õŠ‘ñ] üÍ£›ƒJAîSÍÙR]¶ãuÒd9Mçy*8K§¿cÛRLx d ƒ ‰Ç}î©OY[GP½º,½÷&æqçkµ ÑPÂÃc°YÓ°×;S€5…›»®„ÓZž<™–sY­yp^qNpKàpeYíëßþ•›±`ˆ’òd¶Ò”„Ü~#Cê¬}åÐå=ÉŒ¶[â’ÙA=œÇÔŒã몼TŽÛ+†T…ðx%?ÚS¤/ öïØ«Ï‹¬ª‡Ýâ¥~ J]Eeõ¢¢ÓŽ†Ý”ÑRZiKJÈù†u€Éj]eÁ:àErS’†–BŽ;pq­OËnJûq:KÔSb)‰Züç2°×—ßîgÔg[ã;N£&Ú~ä÷ªEˆË’#RÐóJe.žE<#(z¥'ß[]ËLKÊàÚ¥÷g¿PnK1&q#Ì)Î’‚N2T5»D2E«%£žÄ)…zPêŽZãí¯EFwàk,K‘@PQþ$”¨ŽÚåù¹F~“‚¾3c•àÍ=Ë¡öqm¼€  8¡û¤’1ýul2*:Ø…8mS/íb¿M˜Ôö~óA¾YOñ$ƒƒ¨JňVÞqqÂB¤ŠM6³’Ü„Âq¨íŽjr@E#ð':#/È8»SzÇŸxØûÍq¶†«p[÷û&9Ÿ\ ¤çÉ–‚BVÖ ù'ÛR>Ï5­±Üpä­ìŽº¶7«ÒºÏUý9Û5¾ÚÆM×m6©Ñå¨v9~;¬,Œ„/ÍÇñjôU½¬Ù»þy·ƒªç³áítÊÕ7†5¸ª„-‰­N©4|ƾ2Ü©OKʲR…ÔƒŸõ†5’¯‘‹d-àߊþý—N;Ç¢¯D ›k§}‰¦íý6S‰GÄ×b„‚)ð‡]ùOý¤µ~ ê¬2ë|sü™–ãGmÿ\Ó÷Ãs{·½ÒžúuA½+תé”×çÛô²‘”¤”7°ž©Ä ©J$62s’d¤+¶-ê|#’®>>4†ôn}Kjz‡¬Ò«.÷F‘D¥\NDnÖl§ŒfVJRäU:GÍó ®xk@ã+y©†Ø£Þ¾¼>¼7ÜêtWºƒ³'R-]œ¨I~GÂúí'™,yA*5j}^Õb4¦a )% ©‡ÙQ O|,~Ö³M«vÊ÷íþ H¡Æ;+³hú¥é›ÅnÊ©íÕ*¥IÜV–çÚ/Øw9êUN¡8/Ã-½…¸”ö>Sü‡ðãHjÚö,ø>lÿ‚Öh 6‡à˜·¶Õt]á¥x@«ÈéÊ\‹ÂCDm˜2ë‘_q$á-½6cŒ´âJ{•0²žÇRÏ9bÕ·ƒ2Þ8ä—g«½txˆ_ÞPâVá1jíý!^t+f³)RÔ=˜ø (~ê„%?Mpæ¨9KK« 0@=¼ùw(pW¢6 ©üæ>g[ K…= 8Â~½µ µ»ÔÌDûB§O޼¦Q.·¶¶uÊú¯º›È É¥­¨qiRž9jAô'Õ8k|ÞR),àNÌü-ù«õš:IbÅñkÛšµÊéÞàg‘S¨¼åMŠrã2ûïÍ-©™à¦Rœ‚‚P~nÚôÆ`Á«ãõeç@nC'bSÙ=º§Yµ Ñu©«q7eáöÜ•–¸|"Ë,¶#¹îÖyzüÚ¤gŒŸ±•æÂ)©¸;™]»ïýÑûsn¯K:±nXkûB­T-»I¹̸Ï4üY-çÓn`¨e!Dhml³³·vîÅ-Z_F´ÃÖé]ÕsXUN* \Ö*ÊÁ^c)AÿÈN‘âeNÛJçkeº4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆƒètEþ=«KþW…÷½EôÆøjǰ{|Öðuýÿ%ô+Ijr×u¥ù›´´h-,Œ+ˆGbsžú¾!m#4…½Å™Õšº‚~SSð×Hêv¸!‡¥Gl­.D (# þ'ZÍ™a\¸x¨ÄiÄÅð÷Ý +WÙ‘Ðà îJ”Hì==µÙ‚˜zâ±á¹lÓipbEu.KÊ\'ËÊ»€Jsœüµ6Ó«$ãê®jƒRæš0ã­‚TVOÔŒkRk§­´–íúÂêÉy¥¶;ŽPÞUœ9¿ jR°ìâúð^銺oìØz8/|ªS‰ J ÷`ãV‰Çªªž{IÑE¨?oê”öxT£Åý³³[ mC¸ ¤’¯®´?º°ÃŒ˜—ÒK”–Xr/—.:°¹ #“©?Ä>ºÉ>ÎÒØ›h°ŠX´éÎT)/¬!q¡$>RZ*8ÇþšÛx쨟­É;iTÖ™˜ÚâFž¦kÍeg (v*$CøHàN>ŠÕßÚÞÉýꃶ4Xsۨ˨U&þÑú˜¡Å€œ$qëßTä¬ÀZ˜lîÛՈ鱎²k³$Ëëxu\Ë«O™Ë€˜´—BãYQ@‘’3Œ‘W–¢¢BàÖäÊŤ> O¸Æ3®®\W4}$ø–=ÝNE¡'áÔ†žŒ–Þm@à‚Ê’à™á몶gêÞ-®Åý+ìcµ=õÛͪŸ=JLë¶Ø¦=-ÆJÏ–heKY q Ær¬jäm蘻:Ö2ïX‹âeÔµ­¼ž*[ÃpÖ©R«ºlØ´(b4”6˜í-²¼§ïaC°QTÉiz·²µeª. j *ìÙ·k[}"Mf·iÎØÍÅ{È›¥[#DFˆ"4DhˆÑ¢#DFˆ"4Dhˆ>‡DYãŠÈ•áµÕÁ#ûbšGàRæsþ¯_íÍoLþ—ÁþJðßT§¡Ò[ Õj(EO/N¨¥YQÇlêøôH—ewHþZ¡.rȬej*›\e䤮8I$÷ì};c[̸±8ç…AtSÕ+ ýÝc=¾ÎeБƒß’òO¯ÓUåê)›¬+íª¬&´%N '—01Üia~²˜—µr¨¯2C ­!¨ÇçåŽN~ ý5ŽÊqx$ÖyÇå5žM=ËŠ0@(öMBë}–$¬«‰~XûIöâÕ„ùHn¤wÆwóա것qaæ¤m¡«;^³î:l9´ª; †©M‡V´»Û·²JT;ŒkRõH¸-]…"L¢×$L1ȑ祑É÷acc ;ébÿ5»2ü ß_dÓ\iǤÆóx¥^JÉpSŽÚËb¾Ê]e*í{)“QZU%ß%–Aæë½²¯@Çàu_IT„09Nù%%$ì<5õp\ ~°¦›ÃSŽ8?ÎH‡N ÎBG–ïlzòÕXà°ñVd˜¤"Ź'ÿi ŠâŸ‰jY,•&B5%?ñʱŸq«.ëø-DÇ_gw.©y-U‘Fdeà)#ÓÒ€O±i #Fˆ}f¿zŒ¥'ꤚÔÉ#áUIu*ëÉ?(—%o’O~ÜÐê@+ËYDïí>iZ›`È™=ˆ)LXÊ8©²@c?ËAÄe…/€UõðÉèNŸ^ÝØ“®ˆ(v=–f°Ä”sijZ’ÜIõ+æþu ŽÄ¨M'²I;q¿H‚Îo¯Ôæ,™)Úêgì:Öà̘ø¬@”•)¤Ke Pa˜­¸œ”Qßì«ÌL]Ÿ\V<ßc~isÅë E\5I»õ`¢4§Í=©w´(`!šÄ.± çá~¥EcRÍÉ´;þkMaՖﬕ&ÚΛïn­)ñ³+â¹iK}·]¸%5öu";I!EEç.‘÷BF«4ýŠsC¬íÝÅXÒ–Ü tØ{·À¾Ý=Qد>‚‚„Ét2–J€=Êr‚#SO”l*LÌ–SÔÛhòÙY!¦ÉO¨‘Ÿ|j£ººÙ)ët|RºŽÞ]¥·vþ©º•ZkÓcÒ`Ò­¦ÓI°ÃiCaçY qÃÅ8<”AÖ3Æê=H1bâ &éMEdyIRµ+ç%kQ9*Q'*'×'[aRb] !4¨•U!ç˜R©RÐÉ)RÔYX '×8:í&klúŠ­ÒýZ •…MT+:5E`/¹ 4êÈÿG1ìuV™È*„¾³Y×±±bVª—“mATÙ4ê#2¡2Ãá²Ó…)’H#%záÒ¹ír]Š‹µ3·2ˆdSèugÞmÆTŸ%";„­Å+Û±É ŸmC;Øq ¹FCÕK[1í»ÙtX°ê%š½ßQŽ˜Ò8®$`’PÛÄz¯$PÚ⯸\¾JÄtg×S½ôA¶çQÓr±*èrykŸ–¶Òµ(8±’”ñ'ŽuŠm 0”‘Æ-Ÿä¡­¡*“.ù2´ž;µÿ(í¹Þû˜3P~V¹Ó 4鮕˜ñHó™ $+Õ8×~†§^e Ÿ.ÕĬ¦Ô‰=ò÷$ýÙŠeQUsogÓk}?®ŸR§ÛEqnȦK ,̆ú–µ+Ëmi^ƒY¨Â×’<¬ù·'çâ®P¹dC'¾+1à'R*éǧÈþož¦)÷:YÏÈ¥V%­ ïôl£†¤±ù³?â¹¥Ö~÷ø=¿©úÝ¢#DFˆ"4DhˆÑ¢#DFˆ"4DC¢,“ñ¢W›á­Õ·q…TYÏæ—HŠ¿ìö‡æ}«÷:ƒü!¡ý¥Óµ¦¤’M‡IŠ¥+¿d…`êôV󉋛2·¤…ÇCÑG÷ä’²×eèñ½Öá-¤`®·•r"-ê3ÞËT]{=}Q”ŽmÊ¡½”`3Êßý­Bcp%’+X…a]·!ª}Ф©·!¥©”6‚FT‚H>½³ëªø¯ÖW‹Õ\jòžp3Äœ!e^Ÿ)þX”0õWìw³K«e`ø BE²¤³.TE7ÁJxüåG¶}ÎIÔà;*"+–õ%Ùi‚å-å·÷›†ï'òRºR{/¿¶°Oì£çÖÜ•+×dÔ&þ¶| !*!A˜¤¢Ùã€2€u bÅ´×îA Î]é*þªëóÍU$ŦJ I’[T4‘ƒ…ó~Ôð±aÚIœA?é·©tùQ༶»6ë¨qÌwæqêN¹Ú_V´xm™bK•ê¥'s6õ‹žD³ªtwQN¨°ËŠã-µ(%.`É#¾È80—©ë7ïMf[BfÅ B›VXòSiýýõ3žÒ†Ã…z9ÍAA< ¨+Ô¨û“í£™2Ú×,)ÍnYîTžŽÌQæ¾é ʉ?A«1DN¡9=U}ºKé~±öL½Á¿Zø¹c—`RT8GÊ\í“ßÛWD&Â9ºªæRmnüTƒà÷_¾·+z7Æå½×OŒ^\f(¡¬ÄÍ% @¬ßRSq"Uêp¶ÈŠÆ=Á~œ7 qaùU>uÏPL–V<ÿÛy_QŸ_]B}egk%}<)¼eì¥Øg¶“¨[ž¥@‰dGqÛFîE=ʯڔòRM1æS’¥Œ#€WÊ8¡îp8nܵ––!_}M~’¥Jt±º}ÛtRa0 º/À‰ Jy|®D¦4¾ÝŠG¾·yïÁjÂÛD³›{7¶üêcq¼7*ó¯ß·T„BªñsÈFI °ØÂZlp”$${ A†Û*Î^«&ÏÁžY?ȬîYÅ´ƒK‰”®Øÿ 0í,_e)E‚Ò¢¹ IàûÇ,­G+éÙX¹bÄ›÷œn•PiHS/¶Ëˆ^@õ àh#bRí•¶4hîDýêsãK™·Ï!Dü‹ vRÂp}²«P¿ ~Yª®6ª9[泿§*;vþçWR‡JéíÓ%‘…sÊp2q“®%'Ú‰v.Õaþ®#Æë’µ!­—µi“1j—lÇ”ŠRk”z$RN]R= ¤’ŸáŠ¥öK멨ÀŒ¶¹{Ô~¥:â¤:© yé+S²ßYäûîU•œëŸ†ø‰t±ruPþÓ¶¶¶m_àÐå2·%ºc²a vú¥d ƒª¤Ûx‡’²$´ÀgÚ!îKisö߬¯Žêãä4ÿÓ®öƒb.öuÀÓc <“oÃØ·é›WD®Ú5™r/K²©Y¥îµ³zFB–K£¨”4ö[XN|ÓŸMZ›™ìí¶}ãu¤R<2Å'¨.Öïv·â®ßƒÝAM·kS[ލè…|Õ)›#º"­•XüV’u$9â"㚆²=]A'Z¯©thˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ~/îè‹ ¼kç˜~]Q¯8øŠ÷“‘ÿÜ(cü5@¶?5µ3m—s¨·Á¾œó=Áï%Ê$pz|¡YN5z¼¤¯i²Ѻ4Þ?ÉX«šGÃÃŽç.>Z±éì{vÔ’î\8³$€[j¥*[Jî&@”Àíê¥%8ÿ-jÞ¶¬™ŠÀݦ®ƒ¼|I z: UŠŒöÆ »èƨ6åÑlN;IºñKRxƒ + öâ}㣶ÊÍöº»—³i5)Âgæsî‘ìªÐ GÖJRrï^¬ÛñæT†™¡LÆYóV÷Ô~³Xq*Òf¤±Ý×åK6®ÌBä p1Ù?S¬½kµ²"¸k¥Uª=J§ÇLf“+cö‰u íÌè¬wÖ›DØIXaµÓ²Ç«1A¢ÌiNJ¨0†‰@}®hlc¸Ïã­Äo³ÅW”¯´¾¡Á‰I¶RïˆKü],Ù¹pud<:êÌ9½Óé Ó­Û¸‡–T J×RR°IÕzV Æ>+£¥žó„žÐ¾ÍŠÕD«’AúêÊæ¯Ý"4DhˆÑ¢#DFˆ"4DhˆÑ¢/ÅýÃùh‹|r'¦…ŸPî,÷—w-~¸–° ¯êýà[A‘?s¦‚뮹áï —Óþ‘ ¶´žØ56Š—»°—¦éÅ8ÅG¡õcf 'ïܬMÝ–èá`$–ÜNB†F25zA,+ĆÝ£Í?¬ñ+Ǧ•é­q,IÞ±K®ËTÙ½q_ttËNÍøô/äÉùŽ}þmRÅÖWaÂâÊ#zaÔ$ KŠùIÀ8÷–±}•&éMŽú^u¸¬Ht½Ýç@ù–~€j7Å…Hùï´íå¸ãÊ}ò|‚a¿¼à?ºO¶5b-Êëv)inÆàý±n&˜Ë´Û‘ ið iIÉSÜE!líØZåÕÜ‘áÓâ~°I0ük”v‹š\ZY9 å­ƒ5©¬[Ò­³GªÅ¹¢ÄuσÁ˪.z6}xûƒ¬3ˆ —&[» áæ™›¡\q]SRÕ:Ëa×3™Ã\êq,ù«’Ñv$ûN\–îFa×À†<Õ8Ñ!L~ð>ØÕ†ÍDC²¦>šçCÞÍÄ“YªTeT+TDªkß(oíÕ#!-”`0;öε=–ÂLÙ­vƒ}ëÞ£rJ¬^’ÕV„Ãu¹„· šØòʉÂZl$|Ê$ú]LAÖ÷¨ß>®æVÇjza¯@³eÉ»*tëVtxŸÕS'ŽBŸ<3éÇÔÞv.>‡?’®ð‹"ÉFW/7+/¬üÃå>˜> ûƒ¡¬· êfð«ê:N}pRݬ¬E¤Þ‘þÏŽ¢G–‰àá>bÏÝËyÁ?½© |µÅCSÃgz¼A<·¼õO}Ý[IE¦îu§tÔSLdUY‡W§8³ÉM8ÓªIXÏp¤réïzJr2Ä.Í~«…H°á+ä«EO³ªÛfp3¦Õ.+²~ ¦çù¶ÇóÔ/Ll8²Ëµ”¡8d8³îOÎüz’êtÿˆ³èûYB¢È Uª×•A¶]€²2b$ùÜÉ#£ë­F~µ™jsƒq¿r‡:Òé¯þEÝ\ÝûH«¥›âE›ä"EiˆÛî8Ê\RCY8 *ã÷æu¤À øG⤀õƒ‰6¶&CÑúšÚeEuQ勾–q BŒÖA ûdßQ‘]o‡eǽHÛ µÏÝ*܇\zTµH ùΒ㪠BSŒœ«¶=5ÑÂ>ªå±beM¼p:ܽúé’ηìJåVÈ¿÷šBöÜGK3)ôdä¼ …4âœ1þtáA<ðFs¨¥ÃƒÆÊzq¹öYe\/.¦­)~RÛÇ,GXm¤Un9U8êâ3Ÿ)÷ƒù”럹ñgï]!ŒLp‹2»^þ8»Å½cÙ[iºïÓ.äÝî*N¹i¤Öi¯p%.8¨ém4qÅIZ #Ðë!9⫵øþ Iè˜Eðµ¾¹&ߌ¶ÃÂØ~¿¦U(#R¨ûƒFEiiˆ€ˆÂ{n!-) ó ʕ۹ÖtŒ~•¦ì³«4sÒ´eÁÕ}Ýŵ¯[©C’ í¥°~ñR‚Aõìã®Uc¨UÝ”ïÜ¢»¶àƒEâËó#;PAâ¨°×æœê·ÊáªmËâÞº8Åû—å-ê½Õ„e¹Gˆ…dàeåƒéÄžç×ÔkQÃÞ†ö/¬–“x¤Ãé?sáórBšºXn/’ÖNO¿®®èrÆRæéf±ÇÎÊÉÝG\ßûV°6ú B$j$†ÿ´o0|eÎIB)ªF2¤¸• =€í®¬˜°6ìþîåÎlÅÇàœÌM¿Ö…q ç™/ÒÔëIû¨æPÑñÀÔ1¥qWkĵTÒçgù¿àµÕ¿îÓùjuE}hˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ~/îËDXƒú@ÕuS<%7Ii) ©îXØ`‘ñ‹'IGGÞoƒ,ZýË÷¾ ¡lÎåÐò’hÁl¥€”¬ÿÃTô ¶º?dÙ{ß(Ñh‹Ôp’#üýsaÊKéXȧ×Û ×jNªùˆfIŸIqØõ–Öy§Ñæ}Ñ€sþ‰²ë)1YâA·ôÙ]e\õ:Y¨1ªÂ|æå€]/$‚¥¢£¨kÞŒÅâPEJÃŽ–Û»!ä%_!ΨMAš´$.½áÓË*Cl'Èi$’H÷ÉÔo‰n=e×KX§¹(ýÖÜ)NSØžùÔ¬;8V§ÖK6Ü?ƒ¬¥çÞ€Êc¨8yyTdœáe±óuÒ+xÃÖ¥kë×…Yè´Æà\nÈ+yÈÑ&'Îw*ÈRZ–qÜkHŒXV ¶·Ù)HR‡A’ÍÏJ®¦$6L(7#È{')p‘ÉIÁ#PÔ8ê}µ, ,zÎIÙØú…Á<Ø4Ú+Q’àJ’ì…:â}T¦Ü'ˆ÷ì5»İs\°ï¿ÁuÞÔìÅéš4Ú¬×Þ‡+ÉJs…˜MjOYjØy:ŽáÙwu&¥½F‰Q¦Õi®|LÃ×£° ÖÛ‹i/qÚRK}TT.÷£ZöUjnfÀMFᆵ¼ï˜q ³÷3—‰$rõÖ;вàËR2ÉMýXt6S>๳+•ñM«-uG½R•¼¬¥ûŠ9'Wa›gæTæÖÓå;ctnmÔòi4¹¬ <¦ÂA@' ?˾¶2Yl½eéTé.ñ¨LmE²'Ü5JÛ¡)èkÈø…µ8GÈ„‘ȯ=¸ö:Ñ¡'ê­µ¢Û[–€lÎØÝ½/ìL‰»ç¼ë¸â[ÐÒeÉ“™i2Jb0ø5ü|©JäSé«Áë]PÅËÑ¿žýuøÐn^îEªZ›áÚ ºY1dܯ•³wÜè ¬,¨,+Jâ¢1ÈžàFbGÃ%(Ž bÍþ ôk6êò©õç{ ä¦ÈE¬ãע縧zGš¥2µº¼€àäWÈžÙQÖ’T¾ÆÞ«wŠ=Éqº– Hâb%ƒké²¶‰¸)@š›±rp.]=’§•%DržürT?‹U'˜N£V#Õorží—tàeÇjʉ ù+u˜D)ç ‹i9í¦¬K~ªäå&±p>„Çj¾ûg·÷Ä6 ì?­÷¥¬)"dš/Hj;µ'ž†ô´Òùº¨­‚AKÊNRÒÏe` JgÕeíY}P¤ÒòäÛ.P«Óu2 —JÒráöεÅê­¶›kìΤmË’¹MfeÃJ·j7ƒ3í¶©Rg´e…p/„`–”Uó‚ ;wµ62a!Þ¡ $åÛ[¢.ïîý­µÖÊ6úê¼®¹¦1©´6$7@§^XI¥ëÇš0ÆXEhfL8•Òêªm‘ýý˜§ÚôøoýÚ¾šn©: %¦›ñQT¬‰RJc©!e¦H*í ŸM³ŸÍRÇ,åÉ™<œ@\êÙ1T>"ɲ]Á„£¥ýÉ«¹Xôv¨”ù ¼ÄNU‚NR·¸à}@F¹ÚæBdÁeÄœ¥hþ¼ò÷Ñ¢#DFˆ"4DhˆÑ¢#DFˆ"ùû…ÿ²tE¤e8ÈðÔ³iŠRƒ5íãžÓ¤zñÏaïß[‡ô¨µjýRîR·MTæm¾§m8Í©¸®2%6ŒýÀ”…ƒÿ·\Ž=´Æâîÿ_fò”e?’T«³~ÝRUÉPÑR½OGãÃ×^¡÷/†Éö¯Þn$ùÌ8“ÏäBT’}òƒØÿ=h)/WiT eÚ×N–½e³óѪ"+€z©$àÿ†«Ôp[ÀC‹igÕi´«â (*!%~€gÛP6åvþÒrìÆÔþ¼]uH5UÛðÔ¦g˜.¥¹Í¨¤PÙ!kO RÔ5aÂKhÛÙ\¨îí쪭B¯"ØÃ±bÓmFV>`…­$}ìc¸Ô R.Z‘ÍþJS„¶d,”gR¬=EŽÓ1Ö‡+“ò„¡ÿL«&J<~²ìܬEBǶêSßò9OCÝ¢˜e®jÀ÷Y)À¹ôºÒ ‚9J2ïY€um~ ñí†ùÕèºn döõM6ÕÚâëUBR‘éø!¶ÏsÉÐ’}µ-Œ–A×TlÝïÞ£¬­†?CÓ·¹¼TiÔ·†&þí–ö1¶–M¿/whW S&•vP£ü 6BUܦ êðÌ%6>ðqhO¦Òit|ÎöÛš‚ø\1NÜ•é»ÀâÊØ}¾UóÔ•ÿ@«S#¸& J¨£ÚÔ0C2jŽ”*jò?¹d­*û½õr:Hc|F÷ùr©&3Ù…­óOŠ?L2SÚ¶6Üßsmèê-ˆûkk±E£6 á2Cks¿ªñózêwª¹ªíI+õ¾)ù¶»Ò—‹l)”ÄÑìͬ|:Œê5fŠŠëMkåæôYI SŠGbTÊ”ì¯]gJ8K5œ@íÃâ¨7X>¯N=A\wM‰á½6¡Šh—O»æ (S$ #àæ(Ÿ•ôpJT‚{“ª¦Ï†=ßЧ©I’sþƒðþ+t‰µPš}.SR‡2€µòäqŸõO}MGhTuò ŲJ¢xÃV+ß ír¬üé%5×ÙŒqG„ ¤†[F} à$ØÖ'Å­%½.ˆp­ý>³©›‰µ·OK÷ÌHuaª… ã¸`9ÉS©Ê*ÈQ ò@Ï"ùu5$¾ª§]zÎ :¼Oz™áóÖeÉ`—\•lËWÚÖµII(n¡N{*i@Ÿp>CßÕŽ¡šòÈ #ÚZI(†É:Ò>“ÿG3s÷!0•¼ÛŸ.Ñ[¸pYöU~ãSDw-£ä1ù©n¨ݨپÑÕw¬wûVº‘¡ú’£ÆzÛƒ¸4Ä-µF§ßÉu8ä…ŧ8{|®ñ­ÊH£ìPàš[qù&4oÒG¨U7m ·úr¹ëÖèò¥L«Þ+næ–u–Ò£¼gO“õÕÒQ9ì’·ú4Ø6’´Þ™'Ü"èÑFùM¸p¥¢Xô‡<×½x¦K`©_š{êÓU‘În¡zc|DÌÍÚ¡N·ü7r}M¹i쫽=·]Béh¯ÝЦK¬7i)R"®RÄežå8Çîà€Ez‰&Œ°>Jå,2 =ïêe`ڱᲈŒò~:Vì™ ÊÖ}Vã‡Ð“ëËß^2²BûAÞïó^­²YY#ÕÒÍZµ=ØË\†Jþ'ŠÐ{| †ºÇ«…V”ñž,Kâ½TDûƳ1àTRÃl4Œr@»ûêBö–»X˜~)ÃmÒÝ¢Û©“!µ³å2d()*²œÀë—X;8Gб zÜ.¯W@·=6Ö-èÕç~‘~S™’¸qL7–O–âÇÓÛ:êÑ—›H[¬ýË\%8˜Žñumk[b¸»ëG¾^¯Ýq¥QUH]·I6Ým²’˜ãY /$¼”œœw×xú¡Øî÷çuÆc¾!çðP®ð_v÷ªËncÖª5¸'Gï©Ri þ|¹2 ëÇýß÷zÉZý bÿ÷¬Þè£/šÝër?ÂP!´¥†ÚJB©íêuÓ^MwèˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ|=ýËŸ‘ÿ-`é) èËcX fVôT´ú§ÿÌêh¦ÃõÅ–1z#îSŒ¶Z²||7B;yWE¤Ô³ø©Q#Å'^{E1CÒª˜ùà+ëzPƳÈ=1o E›À¤üÒåå0·i¥¥js Œ÷ÉþºôïÔKã#×Å⊫ˆS.%jZ0I'ˆì‘Û'Q­}RQ/VÛröëô“yEŠ–\E%µT ‹q(a¼AS‡ ×X½—%˜‹m–XTF§Óã:Ä™^RO% %”(¤P?xè}õÈÅ)õr|WY¢"æÞäÕw%aÊ“R$;)iò‘%N(yjü@β",:±Þ±µÖŹqPîŒÊ”ªŒ¦ŒÅù|]êÏ•pSŸUÃXð Î"~«ï\ÏNLyqÕ6¢<°2€}rOs«÷Ù%+Ûá´v‰°§Õnì²õrÙÛI®ShtÔ¶—ÐÌ–ÁÃí¶£‚ë„yaGîù¼½µz–(£9‘®{™RªœÞÔâö¾õ õ)úA;õ¿ÔîÛˤìu /yì5nrv³Pàr„Lžæ\uú£²=ŠÖNs2ÚÉG[?’š®ÿÒF‰JØ›|ZûOñ[¬ºj¹+K¨nÛbxHLhM¬‰P*øïû§RW²£ó2Å´ù*|Õ…ÕGŒ¶ç·p1mîVúÕR£³5L*5¿GCö ©\"Ç@?»–ǹO¾«´r×ÅÕ‘x£Ùâ“zÔè2êèM­J¾¯²©ÞÕÒêju¯XMJ¥jðã<¶<¶Ô¬öJV¯ºH8^µÖ@î[²n*µwž»³{¥D»ìÙ¦ŸsÙR۩é²6œB» ~d`œƒê2=õ€È±±0¸á.+ú#êGqëÂV½q[.H‡q)t˵¨Ç2Ü·ˆÒŠ€É ©¢¾>‰õ×MÀ%ÄÜ2\ÌGð¾j†x]ﵡµ½qZÈ­ÂK5êBÕBb®™¨ã¸’—Æ~bHsNú††Öõ5vê¾\¿'\>=½Tëà]vã0*ëæ†Ê]/-%-¿açB³Û+o=µ`áÖ%uVWU¿m|:7§l·*Ì»öî±hG¼é¦­Eû:áñL>Ò‡>kHHí÷Iýâ1ßQÅI{&ºÞJÈóŒ…íÜ´¢âƒaø÷tíÅÜê chú‹Û^kKQá•>Ð$H„•%CYOv¼ üÉV8¨ÌãqÃ#qøö("¼~’7ÄÏÅ#ôûà#°ûtó/êÝë½u8ßµKSÜj,cê¥í­n¨gÕ%xü5B ÛÞ²UŸgwæ¥j‰ÀmM*u‡±ôÁ}9DiEê ƒG]…@Ÿ–\çÐÛŠîK\ó£Ê,8¼-üißÛøª;Õ^ÛÕ\9Öírꉵv+Ž‘fX..Ÿb•Å!3f€”µ ìN©Í>vÕúðP‹{cdìÝ=T Ëu¸Ò~WäQ)¬y)qXÊ Î¸G0={®MX6,E~ÅØŽq·ÍJuùgس¨òÚ¡Vª5ï#Á—=.G– ’P‡™¤´ zñŒjbn¨÷#ÂgÖ&ü¿Šz—¼%ï…z\—ÅFJk }‰Îü>9!iH¼‚1Å#¹ÔPÕ•ºÉ:ÿä®(GO«©Û½Ó*¸[ͳ[QL½#³»v݃)ö›PªÓßûº’~hë[£Ëu+@ |Ø×q¦¨)…ß/rã´p;êp³»ûÕñéæÁÚv¬æÚjSÔݪßËYZ%-rVëT:ÊS2œ$an,¤öR€×&ºž(ª„Y®ö‘›snvìÊ˽A6 Qâ»¶ëïvíUv͆äªÕ%@muI@psøj)sú² â£Å‘X\êºy¸ ̺x£“€05–Âë.{m‰HÕ M¯±KØD…T™1Ú2¬¼³ÄÀŽZä;ãëoW·m Ñ þع/C·¤Sc¢{”jDX3a6Ö„J±œ‚£œkÒÍMxƒöe穪¶‹ç}éñÒʱó“)ùî¦ILDjYW8­ðY)]ÀȵµX½kY×mÝê4êê§ïRO½QU¥mÆ sâ+¹÷]ÐÃõÒ“”oð»®ÑTäøiųz³'ýÖÁo5‡\EÉhS§6nS Z21ÈGàGq«MÕ^p·¥l°"4DhˆÑ¢#DFˆ"4DhˆÑœîÿdÿ–ˆ¿ŸÿÒGÒ÷Mì¨ï%G–}>óc¹þz˜õ¸þ¸²‡ú£úæ¬ûKøäß3›PX£ØP‚ÔpTÃ~ß×*ØúMS7!ü}"iõ~F—ýeQ[ÁÝ}ܯ|C4ã’<’`F3®Ãä-Á|ź݌Êëî%tM£Ñm˜”ÚžåÝhLˆkœ‘¨TT徿R{áÕ:ʳˆpÿä¬ÓS¥é/føª%ÔŸY[·Ô¶-íÆÜJ×Iƒ+Íbp˜Ð–¯\8Ž_‘Õ9%ˆJgwW Å37ÅDø2. àŒÃHZ›O5”!JC)„ñˆö”³¤eyU ÓªÌt¦•Z8¸9º¡ê¬ëQldÜËì¤ùV¿êý.;‹TƒÓœ8¾ÀþZØp¹v¬—´K†E6<„¬ŸÚTëß[ ×aurÙ¯*ÍýÄŠ ªÆçH#·$·1²¤“ïØk¬oú¨.AgYà³]n}ŸA¤%B;?*?Œ„öµYòÚVÕòèþâè£n™í=ÈÜèUN¡÷î¾y»-"ÜÁ!´ÉN<²GÊHR”}pØÆ¦¼`͇7UË[#¾›ëë‚AêçÇ;¨ž¯h‹·)ÕÖ6gnÚJšfÔ±À¤4@/ÉF¤öôäT{ê3•Ëhf8# ê‹Uë 8ì:kœ}_é¼ÂâÞÏrûúû‘­-´­2ý¡)¦NB[mjTGSœ¤”+¹:‘½bQ˜õpó_п‚º£î?…–Ý[“œy¨—3í/5¿¾Ó®´°ÊÆ}pêóZ¡=†"æê®• ¸{Q>¦:Ý¿ =Õ£ÝvzlšÍ…X[Ô›“Ëø„L¨ò*LI±ÜÁC‹O> g>YÔÌØ”A&Î'ÉYŸyGŒ·…u»¼|Ú!î~ÛLyۖߎÙiöÚY~;)<ʲèG¯f@<8G%dý&å‹ßÚNU2J%RS2C˜%¬8œw)'ð'×Tµ„⺚±ê“d®_H¾)Qjß«öÖöšõzäµB¸èï¦=vÚP?³u—IA(?xjÄغ»þ*•M%Çåo‚Ò»Ó­ù]4Ûô[¯x#9xí]yij¼+"Sí8 Ezœ0XtgºÛ +=ÀÉÔòa±8ŽMðU!b1a"k¿ÖJA´¬Œñe›Ö×·»¾¨øSÕ‹Ib u„ñø¨À%ôìëcZb;Ùl8¢>N ž«¼&Ùº ÅþËïš}&½¤™â“{F[iZ’°¶ÛÈ V 1Ì ñÕZš1”G'W`¬ -­Î™ÝKøhîÊºŠ®À§ZÖuíeÔ!Er‘_z·<ºEQÚ2{)aK`¾\ùJ~隸•º"¡åÖBÍ›gËvþwºî†˜§‘½!?‡ÒÛŸ{¯õ©R·+r,›~cŽqŠÕºÒêuP`–€O£œ +X¦Ð'ٟ֜³5Nš³§víÉZv:nØ? «-MøåÑqm"àÜ—DºíP¶2DJ`äñVSÛ#ñ×^šŠšômâë=ló–Óø2p^+WÅ_ «’52ätíöãA“>S"‘QˆµÆ8²¾IWþú˜?§GõÄ¢2úâ¬%r ™.]AÉOu¥JÛ·Ø#åÿ stq éšÙ93|—´Ó8ƒÉ¦Š‡ýeD¯ñuÃZ¨Ü–Ìq÷¥IPõÆÛ]rË ðÀ[D³ãÆ ’õ¬JY-º¨Pb¨9÷’Íì¤gÓÛ\Ú€¹óW)¥ÙõYîJ¥›x6‹‰uX”êˆKëŸL×>©-œr8>¹ÕA# #½—Bs!ŹvU/Ë.¥Rz%œŠ‹Öã†Faá6i’µ¶29Æ3« 2áô–ü”ƒÊm¥–K¡%¯ÙH~$Œë‰g‚ùuµ¼Ï ë’Gr‚Gg¶A''újq±z¹]q¸ß˜Ëë-¾a RChÏÊ0rAƶ~®%–kì“+kÔ•5ûKôh,¶”’ßëåO¨,ÅHSÅH$~ZèÉa§Î꟱–vÈÂd/8HIÁR¦««b¬'I^{ïÖµúÉdíótk%_4›æí’Ýße ¬ƒÏñ/ Ä´—÷Oc¬‹µ÷- Q?_%/Ìé{¢¾ó÷˨ ×Q—4WÂËÚV<š:V€©:R… « –x,àI`mùýv~l¢ÄdÞ&úçù:…:Ýñ·úžÛVÙmŽÄmÆÇmu¦š›),*}ÁTu-­´ªuIÌ:øâ¯BsŒ©|SBvÃÁoV}a=ßëëðe lþÌÝ{ñ¸ÔkÍ£®¯s\.¬AmiO”•$…ºòÉâËHI楸R”%%J! dܰ™ €qI¹–³î‹×xeôÏhì~ÇÇ¥o¦ämüf…Vép‘gRªhl%÷¢¤ñTò¥Y ”áAjîaªB&hÐé%¨'˜²WRÈÜËÆGÃâ÷% ¾©ÿ`ßÔhEA›>¾Ê93%-‘ÉœñYÎPê>bs«pHöÁîU+€šAñYÉáùÖÜߎ­UN¸ŒV,›¦B)•ù °¾TZšVÌ—J€ mÁû5ۂ׫–ÙUN"TJJr Aûê"/´b’}Ñ—ƒ¬Ëy~_VZA'÷IàNú§ê®†õv|/üTaôõQ«mžñ1:ïéçq›T;¾˜Ë|æÆPl%™Œ«) ¡A‚HÊQõÕ ¨!ﲫ-ɵÉ<|F<7êž5ëSy6‚â+f.%!ËSr­ kᵈó‹dÝ ä ²’“ó%IL³ã…­f͸ÿZBEwŽLîü~^úÞ­G„/V=Vu{Õ…·³÷~Ÿ{Ø2Bªu‰µzK2¥&HR–„<œžJP ¨wV±!Xqšˆ#<sLž¤<÷?mwviŒíFÊÖ X—ŒºCN¦¸©;.–›ZÀ 8HFIÈûØÇnõªjˆ*5bÙY\§£ƒYw¿ÁA7‡éõ9½6ÝF…jUl=£àÇ4 :€ˆs$µî€êÔ¥Ÿªp~nß]W*¢e#QD{%Ÿz¢;•qVﻥÚÅ×U­Ü5É$8üÊÌÅ˾_7r¢}r{u#‘>Ñf¤&ê­)ý~¥¦ÃêïØIÇh7ÔUרÈ^)Õ(éÍ׿o #ýMo `>õF°6uœ“_u7Ñ^?µÝȤ4‹^‰]–ÓGÊ ÆÓñÍvíÇáß{·Õ ê %ô{íâ®Ó8›îRW?NÖçN]H¸½¿ݱê&'ë½²ë'œòÒË'#’Ô§è—@×7L†³•-bïúßÚºú&mô~,ª*¤Í:$T)+ŠÚ”æ0’„Œgˆ>¿Ž¹åö Ú¦Ý)+!áÓµ4­Ðßú´ê„ϦZ”¦åŸ_ :ê’„¨ž]Ö³¢‚óâÊËJóý_ sW‡G…¸íŠ]-¦ƒÿ"r ?¼~§]Ñ"\w]–•"›Rtœ•Uä•|öÐ0çÞ·1ÝÜ þ¼"ªWRPTžJnÒ6۩ϼšô¶OõKƒ\Éžú~–?öFÿ^æ› y,¯’ÛôŒVð†?ð­¹én²«a-‰¿'“"+cÿH¶ž9üq®Œ/pb^ÆÄ¤-JµFˆ"4DhˆÑ¢#DFˆ"4Dh‹ŠàqLÐ婊Òʈ#ØãDX5úAjûBÒu /v_Ql~ù&rVójÄOþ‘ƒëˆ*rý„¥Ùø:–^s—‰çTRа´7M¥°HöW”AOáŽ:äh|?¤«Ëµ—Ð:JÝÐ1Úç+üŠé¨3Êò‡ÍYK9`¯núí›mx/žT‡µS_cüFÚ<¶ùHBÝl”öùA²{ã‘Õ:Œ½•Šl8IR+‰Hû’ÚaÆÞ_§~g `5͈­;5Ô1Ç’hÈ š´àÓžë)*GÀ4§„§¹^ â=I=†¯*eínKTû–¥tT¡Ã©=õC-H KHq)ôJˆÏÌ3ê}u™@_ª€øWÅBYC&™C-ÇQ#*RÉ÷ÏÓQ†%±d¾O]A™;ç´†©ù}Ï–’3éõζf&Ä´}¼+A™B¿’‘&ÄBy¯(XX¸|”Ò¯[QzCK[I¼§õ¯d®¦—N§\ulJnˆÓ½¾Í©$þˆsÅ.÷(ÀIcÂY:ŒÃX:ÁúíTׯÁ–©Ñ%~Fâí|Yw>ÃÜÉïFX–í³Ï¹mŤã?#ààŒa@A4îViªñú97ª¥qí/’>èq'9Ï ?OÈ÷Õ!œ+ ­ï†·‹æåøz™C£.wmýy f±e\Í´i!D Þr…œñ8W¢µ8¶Nþ  âÉŸ½l_‡‹VÅS¤Ré;K²–~Öɯ¬®¦ºk­:㉠y|Ù © ã“Û½b˜˜H®ÏáòT†!)¬lÙý;¬ßMµZz½êoê ½.«T†ýB;…%jê\’â}=Bñ‘ü:æé|AR$K¥¢ÌN•ð’¥jhµ]J–Ìús¹ öR“‚’>š­fSî$縒›„ÎaÏ4TG0Þ3åº=PâV¶‰ö¶–Æ×KB?E“§û×}Kv$£áìݳ¥>Ì™‹%v\†V–“Œ+*R»ü¡HÏÞÕ˜Cì®}YØ;ÕgëO{cu3×&éÞQiÚ=^®Š\'THaÖYG’{Ž*âÓ Ö+Ì^nVÉOJÑ+‘ÐÄjlj_‡ÕÛÒµ]ønÄ ß›QXó Ž¶ÃkQ‘Np‚y üéOãÈÂS¤2ÓI¸þ|>»V¯1Å(TGÁV»swT¶¥@xù \ 4ì© 1$¥]±Ë××—¯¤?5/Qz:ró€×GÇz¹žv¿ÃíçVÖ¥­éU8´¤-H<’Z2}ðPu{E…±’¡¤¤¹€ðVÊ“Ê<ÆÂ‡Ê‡9‡lL{ë§ÅPt‘cÏò•uÆRóð52xãîrJÔr}ý5Ñ©ðõ;”+×Íê‹>¹¿·K… U—·¶Ìv3íæÕ™|ù…“ªl'H6wÇNvñe쪢òcK Šçwÿ€]¿´ÞÒƒýíÐôR(ÊõL¥]ÿ®­Óý÷/']ÔÍ« DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ¸n_ú†gþ ¿ËFE‚þ:#¨^‹©ÑR—wt„¸~B¡&–\êzP'Òaäÿ0T+ÎÚ6 »?OÞœëN\÷²ç™ÅÚýwpfEŸ#SŒ5Ù´ ú$g¶¹=r8§²w”×Òü©Ä4ñh] ^¦oÚ.²vÈHzöž¢¯‘Šc |•vÏy/›1XU;ñ–‡r¶õRóCxºÛA}Â[À9#±Ïsª•¶ÊZbÙuJe[rî¨k§ScüDäÆTÖR’¶Ð2¥ Ÿ ÕÒâäº"cƒ’gPntu™Ð*@}æUºÁ⤶®ËAíè}­3ªîë/(ôT=OIòÛJŠQ‡psîqê5¾ÒÓúËщ Š¥4^l$¤Ÿ\ÄëL…lØ““aì÷ƒ|,»4ÅEzþ¬3o¾èîcÅu_´^‘©Bi4|÷­^\òrÍiGˆ_ˆ~ÓøfÕ ôÜ­˜ov-¸ÔSîšTú‹,% ã¡dJRЇ<Â`zò¦Ððe͈oM{;¬£êÓoö§a÷™OÛíÇFííe–ªj4ÖV¦ÆZ”M-÷AHut­IWe`gˆªì,XovWb#!Úk'Þîx°Ý·>ÄÕö‡i,k#b¶Ž³1*´[vž™Uk…¤•Ôªo…H’²Gu$¶1„ñÀï³ÚÍ“}}sZÖ‘îÿ_\•NmB> 'åo°)<‡ãÜj52ìnž•H©¤)aùª>ùõǧøk"+lII„–a»2CªA(ÀZ¾€€UŸ_^Ãë­¬¶«’Я zvðY­owPËsh:dµšjH~ºê ?~ÉìRÓ 1ÆGÞBU˸FO.:Á b“6úúù-êjIà >Eõ~ÿ«öêø¸> }\ñ¶{pèlQïjb)–Ýj€Â˜J}i‹ŽB\Œ¥ ¡$) ûûëª@X}»,¸ÌâúLó»ßŠ­žþ#1º6ªÜ{g{PRm‰óþέR|ó Èi^SËCg!Lò“ï®u,¸‡S'Y]ªƒkY³NžŒè›#Krè [ôÊÞÛÝO"[Ž(¢7˜Žé[dùÆ5Ñž—X,CÁP†³VxIT¾‹¼TîO פY·52váôïWuy¤cÍŸh¥y 1îRì|–œ!${¤Œª2‹fEjzqœ‡'Snõx9ôÑâWµÒ73§ ®›g¾{¿2‚Û’hIZ€RZ©SOúU-ß@TØ[^á í© 8¤ÚÝÜ©DÐl“_¿ðu—}]xvïA•Ìßö›ìМX.ZVgQ* ' SrP8¤«Ù _ÕTä„ãë.”5K–ib7^U•VblE.CpßnXC %M© `¤çŽ#[ÓËb¢ŸŽål:ˆÜ“Zë6Ý»˜Å5‘iÈ·EA–¡ ù‘Ë\Ô©<óøk¥¾Í¤+½­îMa'£™²¶\]QÝÅ…P…t? ª’™TÅ.2Ÿyƒ$rÈZ‰ìIƨì\×BMÊcè ÍÆëÎê‘D´#}›iÀu+¸/ ÈRivã8ÊÈQǘñIâ–y(¨}ÔåÄÌ9ºˆæñZ›âU¿–7„‡å7§«ÿ›/ Þ˜iì4ó„U#D‘ÄÍ®Oi÷ü¶ÐÓgP’¤Œ$ë¤Ä0!ßÿšãÅLnR}v,„²-¸U± ßf\õ¢L¨¡–¸2ÜäeYC®Qž¸[>+¸îEVé÷: êÞÖÝ)0$ÔFò*´Ø¤5%†ÒœüÉVT”ãÕZ¹ ˜å,íÚ ©ƒÑÅOÝOm]»Gê/roµ¥WàÙ•`‹š¹A­SZ·S5 (}l¨e !\sè=γÒ*2¨]mÚýýÊ= Wª†mÎþåg<=ví;wÑm®ËIæ.s+aTúU!e ïõIj®ŽÕqsüŠó½C%.¥‰Jâx‚€ ½È羬ýÕ¦Õ »W¤ >3SÐrµ`rãÙGUŸí˵•Áû .N«÷‰ÍœýÁao ˜¼Ê/5RXQ E5Jm´§ß‘\lþ:жšŠ£ÛŠAñÍ™zidÖù<ó<7óZÐv~É,Eð%·^²¾3¥KmI<Ùi a•}[lðO|ãRÒ½âädë:œµej"4DhˆÑ¢#DFˆ"4DhˆÑ ËÿPÌÿÁWùh‹üs•ö_R]Ô °Û[š•íƒ6Ÿœÿ$jÕ+ÛJCõÅ• Üè&“§®ß§ì}öê&ˆÈKqiõ¨Õ vO'ÂÉ#óƹ: \bÛæ¾‰Ó¢ÖèÕ_2Œ›Ýdì†èráª¯ØÆeÿ¼œë¬]r_;l€UAñœ§¦D°Q*KŠfjŠÏd °ÕZϵf¯QO1 ¥Ät¬!ç›SKS+RVa‘õúj¬áµ²®BD›i‹ðùmkBŽê Æ@ú÷ÔÍ‹ª´&ÒÜ5<”§‚<ÅŒ§—ËÍ?Túç[ m-L¶p¯j|‰_fËeµ!„º HR}ÆO ÆµÙuŽ áSÿƒýƒèñ.ÛŸ9—Ñölgêm6ø -pÂÒA=†}N®Ðáyqy:­Vø q梯M“xx¹ïZÃË §ÔQL JðT†Úo#ò*'¶´›3uŠVô,ªÖØíÕOu/ÊMµ@Mb¶úcÄaùŒAejî~wÞZl;©jñМ°Š˜ÌDq·rús郡2dï>á/¨­Á„´¨mÞ×Ï1mÈêJ’J*7Ñ•‚‚âTˆL©iZ1æñ<†Î,;óìúÿ>Å3}]¿_äü×Lj¿IÍK`¶óª= ±ÿR¶or›û&§@‹5ê‹v…iŒ¥ÆKÎ|êmà‚¤©@wæ=Ò5´À#gÎËH%&"ŽMüA‹ÇÞC rC¤aIî8Þz©Ô"×%böK§e.üЏoGC´·š–Áy êÐr AùVÁÁì}ûj9VX”ÐÂRúªj%8-R¿ N¨COG‡—j¹%Dò—ðQ–ðøìP-«j=™Ò¦Å\Xº+T4Qíº"\R S®Ü;!AA^¬‘­d­Š!Â-~Å´z<¤-§ßõ½g‡]Öey­íªß“gÜ;‰:¯%Un ‚TïÚ-¤©n…„ +äJGd„ñöÕJIŠ¢ŸY#í7˹tªaÕÍèÚÁn 1¶w)ëCÜí©Ñ•Å©‰oš^0HÎ}q¬bB£c&Oë6ð“d×cN…%+C®¦z›”ÈXæJ†±@v7%ŠðÆ+[wVú»zîè~ÝêZɨ0‹õËPØ››žØS•ȾÙerÛ „™ÓægžlÚ„âb®×f~Âááo‹®I¹êä+ZÝŠd²møöÞÏÙtØqþª )<…²AâqŸCÛTbêÕ·=g¤J2#ù8s)Éù~µ£ä·®Ó¸“ú»½”©€ªRiÕýCe)JÇPJÛ`JÜX^'Õ õÊõ‡­šchqÕÕ)"œìpGc™Ž­`ýqÌ“øj´ÙVÓL>­ßÃü×¢£ª讓§'¶¸ãÃûLÍ‚Ü~”vý½­éêÕ¡4-¸0~% ’¢N¬À…y£{º‘µ2Ñ"4DhˆÑ¢#DFˆ"4DhˆÑ¢.+‹þ£—ÿ‚¯òÑø÷ÝÙÑÛÀ|‘÷1°Oâe°É³Mÿ©Áõ났mŠ–hùþNž”u)=tuW¨)˜ÎRHô¡y×/C¹=^FËÛt£>‡ôk,ðÉø'U=ßùÖ¢¢°Å´ã>Ùë?Ú/ ê*‡ã%#/hp'âA^~$ŸË骕%é@{ÔÍ|eÅP.H¿2¤ÆJéZqÀƒœ'·}@c³Ú¯0“®X)b¡pDH“;RÝ 9!yR#°õÖbÍjy.ÈÑÔóÊm‰-¸ˆáAs†„à¤n̰_u~¶–SžHòÎ\mkùüI>Ú{KN\”ùà¡{5ÅöÊ¥¥Jòn8ÑØVy$º¼)ÏÝNß½BøMÛ±T­D¡¿{"£gx¬ïóâ»ËSºSÏ€Ú'Cy(-<Ù' A@ô:Òm™HV(þÈ{PTtÊFT†œB}Âþ\ãêüu¥ZJV­¥>ý¯A·mê{µ:ÕmäCƒ<פ¼µ¤% ’N}ôfŲ+GqaÄKjz;ëÓ§þv›nú5½~оë3e·šÑ™¶«Ié[NRÝ%ÀãŠeÇ|¥¾Òx¡iÊIâqÑà ooŸ%É8¥’õ»ãÞ¡¾¼ÿGº¿µW=fééÙ2okU…8¹–\¹Iûnˆ ÿåÜV¦‡±ìçà…¨òÔ z54Už¬Þõšw\YVâ«Q*6ýJ¡©Áv+íc … ‚qßÿùª5ŒHWR–A©o§m¾z«¿cÛ»UA©ÝµiŽp#ÅyÚROÞ~Kî!)m(7psÇÜàdT¦obo¯Òš¸bW[U³ý-Ô<;¼?ê{]°ô·¾ç\1—OªÖ¤ÌE>*gËBýMn¯æøxˆd!¦’ >jÎ{“¯FÞŒ0Žõæ^ÒJòIòìþ*¤Ùžû­C°Ü¦ÍÝý‘›_—!RÖïN,®ARü‘’¢{Ÿ/6¹:*s=tfÌë¹K¦éB-L‘—~Y&wN;m{ô+Ô¶ox¡Ê°jr箹mOV%Q¤>R®D’“…•6Û|SŒç±‘›Ñ˃ÑÍ¿æ¨ÔF26º1É÷Û‡‚¾×å4uQ°¬ ér«"2‹ §Zİ”c;‚x“ïÛ]ˆåYòâË‹$dLjƒ~ë~K8º’éþƒ·´Ö„”U[©=$ùÿÉl”ŽÁ }=A:в”›jì­QV_gjµÞTÛq—ƒux®OuK)âáâïØÆ=qN"ÚuÓ Eú©‰smõ¥&k‰ŒDˆiO)|Ï®9;ãñÖ†L¤a|JÇthØwtù3m£¢ž‡‚Þ?gË+XËg˜wÄcìPW‘Äøs»[µr«éIŒH²uçÕU›nRºFØç&FV]»S¯Q98”…ùQÞå¥ÓƒŸ¾{ßA¯4lÐO$|/ó^ŠyÀ$-öùqQÄ=áUÓ“&‘6˜ÚØHeªzÛPp¨§C`Œzò'Zcû«|.²™¶ž:îí´©ò×^§ÐëªU£>7 Ò’…y‹’àp¥'ûîÉä3Ç×ZP¶rÂ×u­d¤Á‹¬¢n¦v2ÓØþ ¥Yv…òwÜC-Ì‹pH§Žr)ä¶Ô×%Üc >º½X¢m¶w¶vÜΫRJ/°ãÙÅû{ò‰”5üσ`%n´>ií’œØãÓZÓaÁ²•8˜ÿ5x< :çÍö¡1ðF}³{ÈE¿pR¤’ˆÒ!º¤à§¿…%*Î ùqï©àªçïeFª˜¤„¶¬ë\ºˆÚŸìÏz*QØR¥ÕT§(Xâ…g¶I@>À«óGèÄ•xe"QÕB¥N¦¨"¡T¤Â)ävHZÆ=0süµ úª\x6è¬SnÊ”*„)mLûJŠèKKo _|a`’=qªÒ…ßk‚µØ_¹F[wl¹Ô§[ÅGYKÚºS4ø%DºYF)HP8W”……ãØêkâmÿ&;*ÜÅ‚ §ݵÿäÿ¼”ÖSžÂ8¥-¤éMXUFˆ"4DhˆÑ¢#DFˆ"4DhˆÑÁÿPÊÿÂWùhް‡ÇÛ+£ô ðôotÙAÏo›ÏέB߯Â_[ÙV²?®iÓB"øug jJ"u'†}Vk=¿®¹Zú~ö›ñ^Û¥ƒ Ý/¹'àžRª‚ŠP¥”žC¾;{~ë>õádSÆÃ}:íüÄ-æêÆóñÇ‚UË(ÀÏ®5O\ô½b!Þ³qQdȺ“†\'Õ^‰OáªöõUá+YvH ¢›!Kçûnç±ì¼÷9ãQ‡UnOµ….Áqe–Ô”:€ž%¡òŒœ|ÙúËVe‡šá½®çqTæÚ·å-ô£˜¾îÎu­·­C-¤—µ»—\é×}l»ÒÔtPî;NR*4ÙŽ ­)tv䤓Ý*‰ïÛ[ ¯c íÍl@8dÜëWi,}õïiSi@ÚTK*á§±åý›vPeTé (úšuR „ÈìvmhÀöQÆu|e†aÙ÷>OõßnåÊ*)¢}›÷ö%]¿è3ÃǨk‘Öl+vŽ*Ðcªtªm¯yÜa ²Ÿ¼ã‰’G¢V?-JFãŠÛ»ŠÒBœ ß¿%x­îfÔxC턾éûmìý¾ÞÍ˦¨VëÜ“Q­ZÔ—†Â”ãªaçÆ&¸„¬Œ+‰Íhú¹;©)ا¾³6YõáA²kߟ}š¶˜i+…ã‰S–¥Ý–C«R¿’´¥ÊÝ™«5E†_Ñ%j¤©×äù”¶¥™¯&8`«Ï!.-)â•ÃÐ t1zËŒÝU÷»UAiÚ¢¿¹ïíí¹Ce_iî EO÷Yü~²ñ²©Û™úJ÷têl·öËcì›nl Mºç¿]˜{‘É!±´ŸÀ¡@~:Œë‰£Ã›©š°;ÛÃóù¨«mÿH;ª3¸Æ}J«iÞTSƒ"×oÇfœ¦Ç«l­¤¥ä¬”³‚¯Cé­H¸m¬±ŽÙ5¥µÉÛãoáVýMüE2Q¥Ã}ß:¥aW˜Lp°9y“„¨c’}³$c4X‡xnììñQBeMQµ¹ò~ÖüÙVn‚úà©OÛ)ß*#m“!t+ê*ßÌd)M&¤ƒŒùiûªú­hÔ´uL㵚Ҷ‚ÄB.ìûòK›õÒŒº¥’ªÍ:¯T®YòÊ•J¨9%ˆêaÕeE Q8<Ž;g]Aok\–2gÖaý¦YŹ1[µjR£JBY^)–Û˜rTŒÉ9ýuÞŒÄñH½s‹Žä£Y)iÎÍ‘!æÚ@)b*ìšvÂAî@퓪ýZµzÉRÓ¬\ª ¦¾ô—Þ$T£!‰8Ko `¨{$vƬÓ<¬8c⡜­"±{ÝP^âô-sqŽÚ'ÓjÔÚñq%cÊm ¾ÜÏ,PãF*#¶P5’j¡­¶ßRh×7Ù¾Níàÿä«2dGø…¡(uÔ…ñ±€ gß\ÿ[et¬JÚøzm…¬wàì¬j­6“@­ÉòS"Sê`&z‡&”¥€£Ä/·Ÿ¦¥ CŽë3º§¤WO¬ÎìÞ —©ëY‹£ywzÕZrÌÚImÓ£ÜmöÓ-%Ä9ÇËJR•#¤{gSO8G¿¹Jä8‚A½Ÿ,ÙVˆÕ§%Sßuó ¼ùJ=þa‚s¬Ód «L"òáV¤Ç#NßM¤¥=1èˆz§{a+BDœ3œvúj¦ÑT05¬¬>nV[ ªzwŠ×«Òª.ÔªÒmÉóÃî‘É̼² %8@ŸMz–—Õܸ#Ÿh®î£;þÙŸoÑé­›zyÏLOÇÈø$©¶Ù”Jœ9ö#U[ÐÈ[·+„%²BÛÓ/z¯ˆ[WO«ÜõYè°Þ¹¤£‡ÃjXdãÔŸºúÚæÍ˜´|ÞÞ «A9H¸ _Ý›|SƒÁãeª‘%í’>ý³xTŸÜû sù¢Àš…¿OJ‚°p Ѐ‘èZ_Ó[Ž'!ÝÕnæüÔDå'¤-‚ؤŽ)é­Ö«÷DFˆ"4DhˆÑ¢#DFˆ"4Dh‹†áÿ©%áŸòÑ þ;ÊþÎú\ZUŽ;¢0OÔ;ÇV©ßý#‡Í–n~ôè¬bW‹Q‘‰ùU"ž²aÉ,‘ñ×AêA¾ó~+Ûô´mäû£|ì5!ÇÏ™P÷œOoLzk²ÿh¾zã³³ÍTï («t‹n´ظšQáKP úöΪÕbÈ”´¶ÇÞ³¢ ‘MŠÊ”QÁ’O𬠒}¹ÔX½•wæ“n‹±-ÇCl¡.ºÍ·’€°~£××QŽD¶Ã±²“웩ʥQH¨<¦Ûm¥8…´€UȧРÿ=Ok(œ}l—›t÷¥Ç’ÄW~&CªO æ_}=ò¥¤ž(ˆÆ’ÏÝ⓯iŸMerÞ ‰ °”x$ñ À$çÛ¹>ÚÕ†ÛKbÍY;÷/«Û խ͸šŠs¯)³w]Ò“K¶ 5Ç)XIAvB°}N×S?[Š«%P‡ù--é_¦;ÁK£ÍËÝ ŠªÅêmµÖîKn–ÅzP9I‚…E <Vp¢¢¢œjô#„W>iRl»– õÔ ÏÕvøÜÛwMv¡q]³™!ÇUÈ4Ùµ¤ZM5€£ùnJ[gC«?t˜”[–bm-³ðt§ŸóʲÇ:U0ÈB#»–”Ûˆ²n嬣'´>Ûîïn}v%ÁkXI‹ ·PŒa±SueO–>,œòQÎ@éhæ,}–\(âa„s{ý{ÕL¾±vÓ®J®öP`ÂrƒY¬íÞ³Öý±^´ëÒãHiÈOÉÔºÒO¨9μ­LZ²^žh£ÖuFJ™Œ‡ùBØG”@þ"9ïiØfhñ–È«[Z¨"ãé-2˜dW]œõ®ì‡Ì>¬““þìs«:HoO @ó³¨ô^UCÏ6UF‡G‰âRÓÂ[It/ž@#=€ã˜õpºê ÜVŒøCô‰?s¼:w7xaV¥[-íB¶ÙQYioÔëQšÊC™ó>jRQŒœçåÁëèZLRH2OÇ—g¾Ë—¥«@m–x_Ьb“"Óº¯ºc­Á•\¼^3—Sm–Ô÷÷Ì)¿˜%h*?tse3j÷)—VúÊ6y]VØt_ÕõZã°áJëjmÎÞ‡ F;ã] `<+›ˆŒuœÕ»ðÉ´ÑXë&ÌfcVôv锹5†º…½É h!\ZRWóž ©#×:§HÎõmõ’Uê®]ËSö¯v™‡»•Š-fü®TfBiOÕéÔÚ{È*)ʼ×TG¯w5ép I…™ŸÅ×þ®'».êãУÑn ý6ج\q’Ê–¸U9ò^_}Óm­)R‡|j:œM(‰=»”Ô¸L…A7µ%=BZtÈÒŸyv©«E¨×RˆþEFœ–²¥Dy’~fU½œü½Æ¸’u±æ».ôhŽ=ÆVî³-CðÝØY6É»÷*®ÜO‰¾Ü‹ÛKAATër3 0â(Ùœè }×ÐÝ”°%Zù+Q©"4DhˆÑ¢#DFˆ"4DhˆÑ¢#D\wyQdû³þZ"ÀßÒeÖz_é겎Aªfí:ÉÏÔ ,ÿ-Yýr®,¢|‡‚w¶ÏÚ*}IH¸Æ©El‘éϦpuÎУúÝlŸ~Ë×ô²Oü¡Ñê_ön_$© *yåó¤òÿtÃ]"m¥ânª§Œ=Sº0þÉ›“ÏS§Å{wþZ‚§ ƒ,Ò=¥Ygpr«(­çR–›O˜•½€¸ÔŸÇUöWD„’tÎ.EŒ¿-m0>|‡OÃCë#>×jú³Ó!š²‹nª4cŸ1ðòçóõ°ÂO²£"ë.Ф˜ö¼‡d>¤Ëa†ƒ‰C\ÓñwÉžçé7uVƒÕZ»á…áEhlÞÖÓ·÷¨¶¤ºäšp¹i{•Ԣײéª$3>h'_ZçNpP>UbüTÂE¿ä¹•Nå«»µÒ'Yÿ¤©lÛÕ—iÛ=n¿ºÕŠzK Ý·“j‡C†F;A¥¶Pxž%jkÓû¢5—©êæë1Q›õ²îY‹Õ7]µÖíe™›©VnVbº_‡M$1L‚£‘–ã6ÒH.<ˆìIÕcäë+‘BõED‰æ¥wã„zžÀõ9íkwRâRwL[ xux jÁµn+Ú³$ñèÔóˆ?U,”¡#ñåþØ=l* \ZزV¢þðç¾<>ÓbIßZ 6Úqd;–æÕK+pÊKiàÒ \G‡sœêå;]îJ¬Äò9G|,¬'A>û Ö»»™x]i¼ªvÕ¡QƒF ÖSN¥H²âÖ—[ ukJ›ð)Ʀ’~®çUã¨1%¿š·Ö÷EÝ-ôÇgÕkéØí±·èáùõzìj…Äô6ýŠ<µ/¸ôB?–ªËOR¬ÜÕ¸'ËV.ùòÉ0k~5Ýl³Ñ¢ÛuVkÎç-µcìüHœ²Bæ- ·ðçðÔa$-r[¹ÿSMA)XdßË/’C½?INСHQ·6×}êH àÙ—pÓ¨i^;ÍCp¤d}uD´Ì³½»¿'uÕþn›mLãñUó«o[ó¬­¤¬mõ±bHÛXwF"ÜUêÚ«’³2îbGPa–ã!DžJà¥|Ý”5 úcc {ÝmK¡Z9u’=Ù·[+üUg¨QÕW°ÄxL!(£å4œ)Eï’FN¼Ý!ê'æ»ÓˆË w§Áï5;omºÕçDš=¾íÊœd«þbx€V…4’Ò¥džYÇ.Úõ°Ë{âܼ¥D[M¹Y{?vœnÕíÐß…Q¥¦D ±PfA9'(â…•4I§:êÃ8–ÊãÕÒ“ŽàêõQxÄÝÊÃõf~&tŠBDi+‰Oêb 8¤¥>ƒûÅuJ°†Ql¯Ò FD<> Ùu ¹9¤åM¾ŽApq$þ\Fëmp]|ÔÝÓâ=RÚ{ÒÄâÃG~·J8x)8€ ÀÀ=°5d¤ÖQ?¹D`¬ ¼E”åRcÑšP/;ºk*mÄ(²’sŸp{jƒ31+ægÕZ=´×%kfº2é^5¹©q"9S[R€b=B[Òäè)Ppá!šôÚ>¡‚‘°>ò»ò^f¶’sÖ ìÌݪ¾uK~»UgeIøuÒeº‡[K`¥ÀêŠÂ’s“È'#=õçô¼2…F°†×{ïÞ½˜N}¾мîħdn*’å½>5W )ÙNˆé!Å¥c‡Ó]Äq4ƒ½sC`Êv÷«…ág¶m]]C^ó%Ð×PU“o1nME9ˆï©ÆŠ ”¼óôô¾¡ÑlíPåÙÞ£Ò/çš¿5 À°~*«Z‘kÙªªÈR]r-9UYR#Š.,$d1Œk· …†6w·m—,ßW´VoÔzU¤Û{rõVeV曚BÐÌjSLÄ\„à@B9'²½Žu©íáÂ-“©¢Ø¾,î«m»e5µ;áÖä·JÜZSR™s}N¼Óˆ%—\p©I O4+œ½5Ë›#ÄY³®¬xN.Ö[ Ò/UöŸQÛ”ªRB¸íÖZENÝyI)©RpÚ’l«‰ââGĎĉ "°ñ e²u3èµFˆ"4DhˆÑ¢#DFˆ"4DhˆÑoþª‘ÿ†¯òÑ ~E2:DéòÛJÀ¨Ü;½!攆Ûl¨ÔjxÚÕп'¿Å”"öˆÉIÛgGú“êVçCˆuºåú¨±Wê ÊF3ªú E†rZGuè:_1†¥ÏÑÒ…Ó©øŠŽJKÉ#¶üõhò'^`3UGÄûo§†7"¦%Í“L‰Pfa‡.Z$% pÈJ›'Ý*Vµ«?DÂ\ÖiX† p¬¯«yó¡•ùnÔÊyöä–ÓŽÃ'ßTŸet²_6í=ÉŠB\ùYZr’ íèOnޚر: ‹Òé¹<˜µ(¡±ðŠ Jž sÌJÕ@~šØŒ½¢^WÌ‚ 2>Ú´a)h$·å'9e#ò:œZæ#Ú¢ ¶Ð­Vý&mÁ]áÿ³v¥P;ãßùÑjJÃx\tfï^½mîÙª‡èRª"mÆâ®éíaÇK„R’ÄwXþRÄ~ÆÞ¢”ð-ÊÇxÇu«IÜ®«.=¬ÚæáX›³Áêm&i´ŠM:MX«Ë~R„`ÙuJX¢ UüJÌ­!aŒ¾ ¬ApÖewù7⛽@Ý&ýxOô[~K÷Zݹ.êBäT%¡+C(M=L¡n+º‚O˜SœœvÔ®RI‚MöZ¹Äœe•Õð‰ëúÉé¿ Û®n­ÁñSoTûvØ¥y¯pÈ$°Ê‘Á²²2úŽrœÎˆÅE-;ãÂ?åÚž=5~‘ ‡:ú¬Ó·okÛ).˜à1kVÁv¿*TFÓæwûí+öò†©MUŽç~Mø«Ñ€ä$Î̦>¸ú>ٯΎ¯ Ò·)¶-jç¦Ñ$ÝV¾âYpQú‚â- ~,ÖÛJ®~Ñ ¡ä£Þï¬ÉG‹ˆòá¹CL±sldU:n+zWC1>"¯.Ém¶Õ–ž)I9?(Î~ºðlBÄQßsï_@œˆ„dJÐé­ÒãðHmBâCx >àßúê7˜œ¶PCe9í LêFA[IŠ¥áÆÕºwPK…ǵJ=ni›}]”}µ¿¦;Ox.; @)mGƒÛŠÁþ½>¨?7¯ø¯7¤¡ÄG$›ro<½Î¿˜“IM²k“ÛCN¿m6ý¹(GÞ}èèXej!C'†ÓVÞ¢ãèÃÝuOU€¶Ÿ&úÞ¤¸]DnE`î*•~]Pvâ–ú§TàÐØ§R>Øš¿˜!ù FKÒ–´‚°ÛËXÇa¨éꦅ°G•ù6Õ»÷ñS48ùY¹îºtìWJÇVKî°í{G{¨EB=N‹± ŸuR–„BT”à¥hQJµ43I´ veJJ°‹íönM ÍèÖ¹ÑUûU­îm­rí5¿.ƒ)å6ãu‡ë7§Jm¦]R’„ñ<œQÂ{2u–§ ÂÈ3îˇÅY Œ#ߟӺ¯öÍUj‡+ ©éŽ|,I!çV†¼³ì~ec¶¹E{—C×Þ´År­NÛû_d¬[ºÝkáÃUI<ƒ)º¡ßi·ÞqjC@)³Å$ •}Ñ®ÝPê(K'³|y®<G9–nÎù>ë~i/~£Ñj[3rnÄÚzŸ‹qDLIS&«À”R#< î{©dÃ\ª¬Ô ˜¯—‚èÑ1„Ogk;ø÷ª“mÚf¹·ôĸˆò%\bò˜$?w¸ôÕÈŽâQ¨fʼn¤ÝÁYÿ ]›ÿ”uŸwÖ«n]ï|%m$Ò©B ýGËP!µòZ[m€2 u>‡€ä!¶meÎÒ£}\yåšÑ‹šŸ#qbRÒ¶ )r –"\•þ2B‡ÍÌE޾Àrþóé c‹Û±¿5Ï6ÇÁ¼]vÛ{Éõ>¹q®ö¡O¢YÊ©‹vŒóÎÀ'å ËÏ,'¸ÆBwRy±9jpfùµßòX×[Y¹2cÞÔ5Ö·3nªmU+Hµ(Ku?h¥”8Òå–Ðâ=I:æUÃY:êR‰ûYGlõrô‘.‡¼VL†aÝv »2S.8Œ¢CRd:ÐaÔŒsh­ÊOºA T˜/Qs¯K£[GëÿÞÄÍîšÝ-¦»Õ¸;co×Jå^ŸbÒŽéÆÒ²áókhOX'6\N=L°"4DhˆÑ¢#DFˆ"4DhˆÑ%sþ¨“ø6¯òÑø¿Æw[ Ú9BÇÛUJãÍ“Ø%2ØRTGäÂÿòêí!ˆé&-øGðÉT¨Åæ®#½É›âÍø©‹¦ö[©lì*ËIJ¿[¥®¸âÏrâ–pTuKB¨£"_5éº~VÓ~kþ /rw½¤Î–”!JSË =ýI Z“¬ëËìáP\TÚ}ùÑNöÚѧÆz©CÍÿGJTé'>ç¸Ö•,^nä<Ö‘˜„ì±Ú†âªtZkq’¯…i¯½“‡ÕŒî? U!%Ô¶‰-&?ØvÓÒëe ¨΀OlÜäÿMaÄ]‡iÌqNÎóðÉiãó¯>P'»`’>™Öôý\K2%=Ó·c?gL:”Ú´¹èK±¤IGà § 6rGaŒ•jY¶n¢/Xl¶À¼ºrñ¿èkí­É¼¢Ú—VÞ3•Q¢1Wb]ŒôT:Ójç¡iy—ë ùr8§ÑC·RHZ£ÒGŸçܹ/Ž/‚­^5#ôÿáãÑ]­nm¾×ª-÷ÖƒŽ\W×*U&iͲ¼¥“Ó*Sž_t rþTê#m•59Ë!í>L³‡ ¤-j{™j+j[ Œ¨¥#¿Ó¿mB8}eyÝlG‚f×ß ]çêŽíŽã5«âƒ2ŸDFpÿÁpHiôƒ9¼UŽÝÂF®Ä b.9®]aã”cŠÉM¥˜í^ár}D}«*¢ó³¤6ã¼ —/¼GÔ¨Ÿ÷µV¿Yt${Rnçî•OqzoÛ´ŸiÒ(”m´›R¨µ:4•<õArËEiX' ÊG竸˜6ͬ«‹à•ö¯|·(ÎÛ·_®\I‹ jL`RëÌ42ÂûßLâÕv̱)ì8WÝ º{’[„…6Ì·Ò²ï™Ü(Ëë“íª5si9~9.¥#“DcïðZÙú/´9—OBýDS• îÉE*# Y,F[ÑT§\Jt¨2ØQ¾Z>šëÑõ~¹. {¶°~¸ª¼ú.!I§y@¡Ëu Ê3Ï )åß>¿Ž¼TØä-îëÞÍ0œ£&N®(éPCˆ ù– !#éÛ¬0uIY7ÙI¸&HKáèqaE̺æ@HIÁY ö9ÔíʃÞ/‚hQo趬9s "®ªô’¦Q":gÃ8VyðX8Èì q¯ZÑDmof^QÜÎWäžÖ^ø À¡\-?O£Ç¹ª $6fÆÈèFGn#ˆJWÏæJÂ¾èÆ£Ô‹íoÉc ‡ÇóHûبvîÎÛ;M6¦¸i]zç~3¼“"¬¥yl¥d{¶ÊÖ¶¹ðž2×f×Ý~ §,z±Õïæ£vl!± ú„fcM…qsÌ-: F®i þÎuv3&,CvñT\½f½éÔŸ²+ŽÌ›^zÓĹQ[òWØaeÎ^ƒ¾5¼ó¥ŠGwï{­ãÂÄØw)§ú…¹lîu…pÖZm­ à‰^ªÅˆ8¹åD}¸ÃEE_:ÒÙJI÷V¨Ä! wµ÷÷)å-X¹oËæ¥Ž¨ºÒ{ªM縲cÔä[ÕZ“õx¸êžE4)-¥´¼¢2¥ÙäIÇÐk£_(ÈD#»»{·BŒoãüV÷ß“v÷§fVjÍ[xÃq™‘æ%`‚¤ñ $ŒúÛ\Ó§&§ $É_§póƒŒIóø(ÇgèòhWåšðmçièß s(ËÆH#<½µ’RıFZ£Œ›5w:)Ú¹TÎ…iôæ/è–h¬^•IµFŒ÷™q’¸åSM‘ËÔñú뻡˜£$ ½×žÒf2;YY›oíè{øåõE¤Ü5zó8L4Dˆ„¥€ÉYuÀ¥P3€]7, ;3{ûU@aǬwO /ofZLϦÓmk2ݦVRªQ<׿ª£Èç“Á+@_r{ÚÆ¸Hµ˜ßÜ·höpˆµ’>àRfÚûÅk±6t 1M•ð¬C§ª#PR…!!#*QVy}uJR¸«ñßYA}Zí^V¥ß@’ôz®ÌVd±Ûÿš¦¹2 ¼~!²®d‡þ”£‡ýf±½Ãuï4E>>ƒiŠ®1T@^„þ,µ‡Ájï~üð¥Øœ©nM˜ý—Npò[Ž%”¡EDúœ¤o Ü×os¯!!]ñ+G«*4hˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ¹kC•&Hú´¯òÑ‘`WιyøÎÙU•Fz] f¶‚mvS¨l­¸n*UE°¥û ó@Þ¤õ4õUŸBß^÷RhjR­Òt<­˜>ÿà§n”CÝm4¦øùk % (ï…%x#úM4;èÚbû«·å:‡¦ºNžAÜwoÙvÊÉÏVpCZžJâ’ß5 }ç?t~'ÐjÌ«ÈFãlJ¹nÅ6ι66þbf½lÕ®søZ› uRh#,„‘„¤sQïózêiZ]SˆÙÚʧ¢sn¬ ·ã*-›*iiTpPæOʃ‘Ük“ìí.ûúÉ>¡P™¸Ê)¼Em–pcçŒ÷Èýq¨÷’3 ŽܹþÃT.:üe'›.wWrOb3OEâ°d/ÕåzWÅ^)mØÈ ¯)¯$$dw*É9Ƥ‘ñŸbŽ0à FlĹRÚCLT£-*b…y‰ ŒUu’”dÒ¼a~¶õ¸qº{#ú@=!*À»V‹opéQК… KJkÌæ“Áº8+þ—¤”¸ß¯ŸCvHBª=t;¾#ߨ¸†LVuEì¯Ñ´ß(}FD·®±n+g!Î*ü*‚ J„$(ÊÌ<$8Ÿ—Ëᔩ]Ôq“N(]Ë nVʰpbêÙþvíPöƒÂò…·ôE‹xÏL¶©QÁŠl%)kÊ<=}yjÅIìwª´éû–+í»1(÷²œuaŸ!ï.3 k’d$+ÜŸNÚ«E‰tfÄ㇂[¿.UÖ®Ùª{‘òàJø v‰8âq鬑ma 5‡nVc Ÿ ê§R}u»¯lÐ#í´7Ùj“ !—6ªã¬•ó–ŠG.$ŸÀwÖâ$ åÏ%¤Ò»*wmÕL8ŠàRáü9o¡ÕLJóî[cú*øôQ~½è»¸(þ €¿òç®#ì.Ms\Ùg"Žº åp4µj/%KNJ|¾J9#ùkçÕdXÌo:ú°%•Ý—7ÛÌJ§¢E"bOSŠAå Á#:ŒZXË š–AŠAÅݯšF¹§+^[él4Ü)AM¤£Œ5hŸÞª™‹&fQ乎T¥28¼¥ Y˧ýQøëÐL[[®¼ô#³‹%+Øö½[§úmq\ö½6®n÷Tí=º‹êjšÛ:’0ÕÈÖ*gÍÈ–Ðm¸ZíÂê/¹§ÇžÜàÜhL·:RžuÇZp“”§ÝI풜žä›êy¿0—q2P@IYÈâ=0N}>š˜Øº¸T…,¹1É üßJÆV3“øvÖvp’X….Zôd{Wâ®Gf7oºâü¸Ñ\ã"cÇ „ƒœ¤Ê#ÓU¶ÄsïepÄœK ¯ÉL[½»N_•ÙLÛ4ˆö Ì6š… §ÁRJRå<;¹•dã]‰Ž8ŵy5·ñ\Zh±õ³® ªU“2¬ÑÝMŒòTËKuÆŸp­<ß) Æ =†¹W¾".åÜfÁ²,ÝêL·Òm]ƨ°¹æ¡àÜÔKŽÑC. !$,PF:‚\âÉdu»6Z‰Ñ}ågü5vþéªQÙ~ dInY±%.:´òQIQI=޽‰…å§çw^_JHTaÜêtrè«Ô/ê'b]¿&eLš§œ*Rr„6s„¤9g³€p;ø(q“˜ò²ñ¦Ø÷¥ÍiÝð+Àܺ«jf—%(SÑ“…  %@Œ÷Öúè˜À£mß®ªWÄû÷$Ç,Pë[km=]f³:—G•ÎS‚íMå8Ùâr•«ú$êµ[ãÚÃfwW)D„p⽸¦TÛ}"¹¸W2è§ÝÔ«³oßt ”G•Z¦®Ÿ¨OÔžþÚâi(kt}QnlÿñØWÔ:ÃSÑ®“è»úC†)…½¦€žB·ƒd®èÍßÊ¿|¶qo¼Û“iQçÒÞl™”±Q’ÛACØ–’Ù¹aÄ%Áßó_?”¯gìe~u2"4DhˆÑ¢#DFˆ"4DhˆÑ¢.JÚ¸R$Ÿ£J?ᬶôX]¶W+²|tkû_(Å~‘¼›1.•(¨eHRœóe=Èâ¼å©£¥ ŠZÊbmâöø7â¹óW´ÚBž½Î–ü7×5®Š•Eš·_]£wϤ¥Jþ¢}‚‰í®ODg)4@ z¤íø¯¯ù~ˆ_¦éÿ÷4±ûݾIý¹ÕhÔ;n£&K Ìb5ÈTe8[óTJS”RIºïÜÇ øîà|[”cúÉmïe†‡êVåV™*¥CšË^RËÌÓÜHGj9* ïß?»©dŒ£Æ"übXJË$ Áz™o×jNG‹1š5MQ\eocÎÁ Ÿ,‘øëŒEabæ»b$ýTÒ«G5¨îVpˆñÔ¢„¶Î3ôÇp¶°Ù•þêúŽ÷™IuMçð)$çÜÚܞʼniÖ)½JøZ|1ç¾!€u¾KAï ç:Ü jãnÿ‚Y©VX¾‘0¥Ç'8Ž BhùR@=½uÃqÙÉm‹ yÙÖífçÜÛzU1ŠÅ6¡ CN³^£ÌÊç§F20‹[ÑÈñ¾´rv⢙¶p—t¯ß[÷k·"5½:ÖÛk¾—Thr¯¬5·VK9@yÕÖ„­|I!E*ï«”ZDg9‡{{ܪÏ@!ÕwݹI¿¤Í²ïÝ]lÆäÐß(v›ì¶òÒ2\D´6è#Ñ<›ÇûÚ³X;%ÁÕZÛ%ŒÕçߣÍòà;—e=åFtãŠÐ³ò¬AAT[îñ]Úm¥§~.]&YÝ"ì7I[AK¢Dýjv–ÍB]N› ¤?WªÈPZœ’®<ÝBK­€ ì;zjάíLJj¤nå'=Êað×ÛzoE>ÝiR· î¤ÉRéŠ5CCP}tµÎŒZi¥¤ã‹ê'û³éé«X "cÙ³P,‚B+#w;aª›CR·aÈ™ }6ç¡F®Pj±’ ÍE‡YBÊ0NRëjRÛZO¢ÐF¹1@[Y·Ök¹KúÍÄr&[ú.“š¤t_»ð˹4k¢|¥ÛƒMSØæTuz—ì—*ºúÁÅõš¡tú¢*R*•d"+5¿5)qN©eAX×9ö×…©?J[9^ù/ CÃ}Y!íýƒWgmcül´¢êÒŽ`”…›¿××W´¿y³²§£Ìž 2eg“~[îÓçÔ*u!$4XLv’Ù䢨?á­ieÛÄožk5-襲k/Í£Û8’+…È3«Í¤ )–%9ååâ2Aúdv׬Рy ïfºòr¦ 6ëd—7ê[©µsh¨’éS-V€f«*L&¤ß*ä ÊÖ’ZÓ)#\CÅ!ë¦ë¿Áw ˆ÷2†«VÜñ ó¨å-¸;4âHF{Œ}u#UV)mt€ô¥Ieæ²—Næcˆ>ç:ßâÚÉi{ŽÊ[»-úå¿e³*K©{šËL0üÁY $€;äçCȉo \ðóZ7;¤=¸­xWØ–M~5>»P*k©›ºKê¡¡ô™A‡NB@)_¡þzêSP^•£‘­|Ûó\yk0V¼ÃŸõCoJ·ÄRcQ¨«zo0Bmô óª¸RÚ}Ð¥F¹uGµ«à˳G)=wø'æôm-W¥Ú³Gº¨Òf^µz3Õª¥E=-7œa¦¹ ÈW%Üq×ib?Ö öwË7]¨<%ÛÜŸñaXv=ÑN«Ö)W Ëf+ˆbÐ`¶¥£%·9äqÛPHv$î†&!ÍÖ®í]5›h˜Vͯ[©R“/äjJŠÅ1|ãûd¨”ž]”=uëéÀB£"¶Êò“½Ìäw»¥VÛ»”÷•åP(Í•ñSM¸äÇO©+†û:õmÕ»ü[~µ™}H·Ë>rë7÷"³ÞHȇ„ûò-€¬cñÖ­)z¢·&öC»Ý–õ§j\›pa¸«r³*IÃKmùJÈ!Å¢žøW~ãPUÓ™‰oÕŠ9";‹%a*V¬ äÛ*|¥?&4–zz#…· Ê(J‹ì‘Ý*BóŒwùuXá Š|2n/‡j½£´”ú2´+)~Ò>˜û%ÍŸŠctq¿5Ï.§QA¸Џ»fêëˆÀeŠd‡ˆ Ô›BF ZCnÙ¾|½õV6?7¨ûFãí·>þk©¥èiZÓ$]©d{8»Ýà“ˆ?ÝUÖ¼6àuI9IV×}hˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ’ïYIƒhU__Üf#«?A'D_Î>ûï”íê×j·^‹O§Õæï­….ɵ«¨¶í¥$<ê ÌŸ»æ¯âÃ7p9cׂ®j–¥žqô’3a~÷Úø/AÐý¢êúCE˜/Õ˜ñ{XZì×å|Ÿš¿;5±´Ý„Ù{VÄ¥²µ.#)™S}Å•96¢ö ¯I' vú [ÑôCGF±ðlûKŠ©Ón‘O§ºCU¥*2kà‰›pÄ=VenµÖý¥|Ô¨ðT‘oBe†X}°ë2RÀRJ{çÔÑa3ÚܼܮA!MkG{$ËÝû&Ý]…(¬ƒløálÅ„—[Qq üFs«ÞlOnÍoz¢rurY ¾›~ù_Ô†WLý®ñ`²¾iáÉ]Ƹ'‹îÂBø{“AÊ)ýSY°ä6Ýóìúèø°©1[ep|¥Ô¤).¹Ä’P=ò5»¾ÊÀaIñã´ß™å…œ’FI>ùÈuP½•Õ šôЧÒÈX>œR¿|ƒ­‰ÇiGˆX¶T¿µ-­7VISÒÖ–ÐÚ#D}æ|µÝ%!.(ûà÷×6®la?%dLj‡rgnEº‰C¦dy*§ÔOíÛ”pû˜õJ±ŒêõƒîeH“/–å¥>tÙ[í±“ú2ê.WüÝrÁv•fܳÁŠœuà· Ç÷r^8(žÞžàž¼RùÔ8Kí‡>Öíì\ºˆufÓå[ߣCvÙ}VÓ ^WÕ¡'i­Š¢dI–‰* ÑÐAL%D)âTx„óO˜a"8â,BE¹dª¶_ gðVGÅ›EÝ.¯Í"Û•%öå>D0”2ˆÉ )RR’¦Ûäxž)S„—Rá&™µ}¯ÙÅi‹ÄZÇäÉc¥Î‹)eÕ·]Û»´–ï{6í¹(×e·Lủ53ä°´©þl¦ë“`xømCÛë%·*÷žå!Tºc®‰—DÇø¦uRBrK „%”5“÷QîruG jÆïorµOéj¦+5óìY[¹ÛNãu(´š\ƒh2Ñ}ÒØSœŠA TçoÇ^oGñ}ëÕi# qMòKs!ÖM=:KUõ p%ËB’ÐÀË$ýÁ‘Øjƒ™8ïÁ¿š»¬‹úÙ5·x¦µùO­¹mÍ3ØWžÚаâRï‚¡#Sзë\.ª×KŽ”Äx$i_im6ߢGÃHEÇtr|¶R¨ ‚@Q'ÑD Œ{k½R;:¾ošàSb×o³Y”ožb¶:KhmN ë˜ÉQ9QïõÔZ­•k\L[I_p(k³mðe΀ßÅHB˜ º•SžY†5Œ=RÜ£Æ[C½6Þ¬CKͥꜚ^V®…χ¶†ë:1û’öÇn]·û†ÕNì¥?tÑc‚¨ðéÓ|—ྂ™\Iâïj$¹œvÖ³Òë#ÕâÂÜVÐÎQ–°[6R§VzÒ÷î¡"nGº(vkÎ3!ÈÕ™é“6¦êJ ÞKXm’~T% ujk_G ík;î½²÷.}54±“ÉPLîïîîM}“êƒûÞ+Op›·é÷l›2rgB¦Ô˜R£Èt—Á ~ïÐë—<(jËr» …ji{éhu5pÝMn>ÛßaÝ“•=M"·*uVÂyÅd¡(yjC‘»÷e@á?wP"ÅŠ7Ï•²ðSkï²Mfçù©m¾‡ëPkT:ø¬ÐïK9Ÿ†‹M¯Ã-S[I4˜`y­¬g‰·$Ug>°²*Ik¬[oà¯=³»Šå¿sTh¿e"eBr˜gíd-QØJBqG8V?w^ŽãˆD‡&fܸBÖÂYߎä³{·¶×ˤ¸)´ºô¸ÍÆ]]’BPÇ%¥(!X# ­éä6"kÛ‚ŠhÏÉ{ªß‰kã[µš«Rá®*cI¨2ê£8þ3•%@… “ïœë\EX+aÈ5dKÆU§IP·!Úðj.Q)”úÇÉc±\Æ Ž¬žnŸ|ëbra-cæöZŽÀǹ“ÇcùÑö>ÎŒ°¯ØDxp?ÃñN„ü˜Ôpä{Ôò½ÍÈ“'Ä€ª×GõÛ¶œó.M°“¥ǰQ2ËòÞŠâqó£ =²‘éë®^€Ê‚I¡{Iпvö÷/¢ù$©¥.•àô k)4ˆ”2'f¸r&vßÁ]o Þ ¿µ-‚ Pç-¦§Â£C©R/¥Ç_£¾Òœw%Œ˜«Qî¥Ç*'*Öhê|æ–:Ë[X×ì¿nç^WKèƒÐúV«AÈzÇ¥ãÅí0»°—üMš´z°©#DFˆ"4DhˆÑ¢#DFˆ"øy„ÈeM­)R T¡Ôh‹ùâñÆéê_J-ݶôh2ä±f]°w~Áœ¦J×EN“·ˆÊCO© öK­’mÄL¸?ÁÖå|M «Õhî4zÖÈÛ×úÜC̵ۭg)qe ®JµÔµfK™¼‹µÔw³´itÛ Ÿãî)J¨¾@ㄬ%#?Lê‡â¦“­…vU¥?Rœ-§R0I$qüõ(ae\÷¬‘ñP¦Ò,¿„ŠT‘PxGv"p32@+¬ýuHØZêäpe^%HDt¸Êx#ÏQS«æ´û©¿oMV~ªº&X’]J ¦)ŸÚ).>–b…5²Çâò˜T„!Â’S‚èHúgRÆý¨ím¤âÚVÿX7Š‹‘=)+YbI.°´¤‚=Ï~ÚÜÞÀä*k%$9¸Û‰uJrͱhªì™Rc"+‹r¹å¸¾El¥D©·£†¸ÐÉÉ;·> áÃfÙvíM¹ñ¬5.éL‡ê`\v« HSUÕ-*P)}þÐöÁ8$ûêÐÔ“ù½ïÙÕZ?ioܺwr­]Û¹,Q¨ÐâÙ0&E—,·ö³²r—Ò¥~ѵg¸(Æ5nêÉ!;›rÜ 8ñ–Ë+ÒoTÕ ÚÚݸ¸7¦æ›dÔ+Ñi²64Y2ÝaÇPß’äµ¶_ZH `¯]xëõ†Î@ïÆÍ®Ýê‘Ó‹³<»ÕÆý ÷¦t‰âWÓÅë:ZU¡jÔåO~›H“ð뚘Í@q ÈIn’AöRǾ¶y5RŽ'ÉV§b š´Ý!ïÍC­¾°·Îëj¡H¹6Úý…H¬YW.7˜4Ù‚Lº›u©4 ¯‰PÈôÞahöM¹ý«¨Ýï%Éóâß’Œ6‡Å&ò™áÙ¹ûÇR³6æ;¶ÍMË^—2¢—¤®»PKå(Z ýÔpB›ÁHŠ{ûêXu†´on<²í[TÁ(Â\~¯e•uºµ»ÕÍÑÜ+‚Uáy\¨ãT1¤®7¤žÃdÇe°O¶O/Ç\)gp³Y¹~ « …¸$6ܰ“v7Zû>þˆ¤¼—Ô9éCHÉKDœí“í¨‚¬±b¿ÁLp»%*åÁhÖ«S#šVá.•P}+ W*œwÎ ”òY9È'úÛI.°¯“{ÔdsƸÎÌ›m:˜¦d¹"zÌ’ÂÒÒKËe¥!ÏPáŒãïß\Ï6Š9D¡wïk]¿‚ê5|²Dã0±ekîtÚ§m¥>¥Z—r\p§ÕÙä{‹ªa.+%P#°Àu$˜ÌžBrçDˆÇÃÜ—¥lý£:É ¡QjÎÜH”’¸²›[¬”(’…÷õΫLBÐ"vwÜÊV׎=¸§fä[­H¶mˆ-ÓÖ˜éGfuÈÇ‚’ò FTFÔGFz¦/šÒ:°Ö¯…m,Û†Õ§YöÓ­›ùù*'»=>dÖ>â3†ýʰ¬GœH›>šԆ,Yvä»ãømÞ5ªkw©¶Î51m®ÁÜ»~š»¶å®êQ4X1éôÒ)á·’$’U•rW¦·‡!+eâê=qz·tñ™`Óÿ´jÂf×íŠ]iškiÒW uiêJ’¤…%J<§cë­Æ3ažÜÞ×[kGk<øñO)ÖI¸!I“õÓÚÛ\èÍEzB@R€H V¢BO¡ÔLÛX·òY) ÙÄGÞ™›‘nÓì:”!k_Öõ5°!?\#1~pÁ(Œƒ‡–2­[Ž \\ˆoor­%@1“ÊßÚYKê-ú«Þì«É4±Û¤Hš¶èí$”87¶2F¢yO§ 3^û³÷©„³¾IòÜ4Qhðá5Ù¨MyHýÉ$çó:×páSâQ[P¤9Ðó8…pJé±¼% ÿ 5GI¿ú6¨¾ãþ Ûy.`~žhq,ý!·Ž­×Óßðºh¹<;·œeA™xÒ¦íÍÌY„Ç›G0Ü’Ðy+ì3‰bp¬%Ää…q4t Û£1Í#>Á­ºÆä.ïã‡Þ¬yMÿŸºZ8íö§~ynüsîí[•…¤Ü‘«Ëǯ­"4DhˆÑ¢#DFˆ"4DhˆÑñóèrƒÕ÷G/ÔeTâÛ·E¦²Í.±!ká,õ·{ââÞŒµ!@áÏ,Œj),C½IÑ0óUº¥MvÑé.ζ ¼¯*9bŠQÀ¸À9=‡¦xŽÚéÕž1î\è˜u¥‡‚Uß BeÓ·íШ*ŽÌ††P§¦©mÀì¼PŸMfÀx”U,G²“ Å™P±ß‚ÃpܯGm1žm^Œ±‚V“Ë=µ“ëk3³ö-[1ÃÅ–_xÉYu›_­Û^·uŸ…Ÿ_ ¦D—þ0ËiÀSæ(•rïèuV§Ô›Ü¬Ñ1ê0“ÝÙÕw¦íên'_Ÿç=2RÒ˜pJ‰Ï,Ø`s]Ƀg‚ê Ž=®)ðÛIz*©kEm˜èè?³=I ÿ¼°”e´6¾mÚšU&LyMùì.;¨8àSùþzØ Ԇ˙êKõ|ì!Âò€HǨÛž¤dTRg´J@‘LŸ·¥t;b£"ÔJ#´*© |EPºœñóRAm-Œ…d‚uƒõ‡mûT âÂ[,¸žNÝíãŒFÔ¶dqý½cË2Q-ârR}T={¨{ëSG_ ë³öw.•=EHñ•Ù›Ölþ Æòº(°¬ÙE÷Ûj;ò„Š[L¬¹b»‚®Ùà ïkzHf ÂY;puA€!³³ñdщ¹p­jýâ§8̉ÖÍR-]†”‡T˨_q‘žºèÃÆlEšçË0˜Ûz×Ι.o¾ö7t6TúÁaŽuÍGMø¦ÛÔêÖ@IaQ™äœò!GžÚîWƒŠ6½×ŒõFQÉ’¼5àØÞ-×gPwª…¸»s-ú;ö店¦QââÉi qH*inç B{ê©À™íغM-QaØö²or«½UomSoöž‰²Š¤Ãµ¨T¯\5úÔåž»*Ï•¶÷“ õÊruNJÁ’/5§ÞÙ¿ä¬5Φ­füûÔ+O­<–Z"j—æÏ”ë â‡ïØv9í®DØÂãÅ\ ?æ¿”·§! ­+dä|ËIüµ¤NL;[Öç…öHRÕ‡©y^Ñ"P¢K©×R¿ô “ñEá÷pSÜêÐMª°ñu\ÀLH‹&nÕi6—Ã÷~$$L­Ò(6~­RbRêÅråWG¨S­ ” Žü–×BO°‰Ý»r\ùª¢pÂ-d³pt ¿"óªÐ%;JB‹L‹"51‚~o-´Ž ^3‚uÐ le„žÍÙø®yWêþÌ=éÃpxoÒ(·),î%<8rZâÍE £«•ñ”´í•`ëv¡°öqßðZùùâÜÝ–RãôÓk^Öû4ùÓ®²’¶Ÿ1œ¸é)mIXX!+Jøñ%'ÑZjÁº©®—úÎ)rÕØ­½²îjÄëŇI®|]ª®#E×AOtyËR£ïÄçR³mnúä«â'Ù,Ù?eí]:ÉÛ¶_«.¡E—&"$:ŠBS6¢Ás!(oÔ‘ŒM`̶~*C ¢ø'­¿³ð®ªÒ‘k:Ë’i1ŸQ¯Gmψm Þqä8àû“¬a¶ÕíÁ–÷'¾™,^–«WÿÅÒfÞu÷èÕÖ“>“’Kki¶Pd¡¼mlêÌÎÜÔG™m?‚qÙûMƒ¶ÍÞ4êe·Lrá v:ª—JwÌïó¹óä`üØÿWYxe2}îÍÅ–Ì`!‹&¿4úÁo%–¥Õk2o*„ØNҤljÎT1Ë)Sˆm̤Ÿ`qû½µ6 Ûg m꾸_jîï»±;é2ª0·1 ÛÑ©TÔÁB0DT7Br™ (ûßU £ÁÖÍY}oU.Øv]ÍZqö¥"eENT*¡²¤ÄŽ¥ÅßÝ 8βdÞª6&ʼnׄ—¶©õÄy!Ë4‰Jy‡Ût–<Âr¥ Äd‘ƒü:ØÈñaºÐB,;B—$PÜ£ÊlÄ9ªó€Jagø’‘€3øj¾×YY°²ýU£µú©RØz\µDó^z:aÓÌ ¶[âô…•)^{ŠìT³;û'V<äóÃÅW*`ñT/Æ›¦¸½cl*ôÅI’+M“ZJ!IõÆxœwΨ×<ܯQD,$"ªmáh»K‘ ™*dºs äHÑ>Ô%G($|Ê$ç'å´…ÇvK¶ôá¨×FxìùòeÕú£^ÜíÆyÊt%¼¹Ê6å9¥Kíÿhq€ŽÝóª£. ö·;®—¢«£hÈ­${’ët1P¬H¢\U wíØ|yµ …Nn[g¬%°®{:¼;e²õľÚté·öNÇ¥Ô£I‚Š]N£ÌyAm®?Î}|¶ÈJúdjF„ßjGË“(JaaÂ;ùº°6Ï‚m­zY¦¿'p&Ø—uÀ…NzqVôvKkpJIRÆGÓWš’WFêV3é¡ÔG„͹µ3¦Z·u[Ú€f©TMŸ2lERÛii¥y@äeÕà~:­YLñˆIvgíu5-HÈd<S¨½KMUgíy ú°ñÌãüA¿P{ú¨˜g}’Vq‡ŠwRú;·­ö˜†ý>s“b8j,yÓ‘Å í©2ôöQ´×¾ñ³ziªî5RnÑí]·qUd̦¸íLÅ©)º<‘5ym(÷ï«%6©µ,ožæn?’ÈDrޏ¬ÀÏ™;eÜÜÝ?m>€UÒUùoݵ¤\ô+Mç—_©ÑØ и6²¤¥G,ûsÇõÕ!À­qßmÍÞ½u`Í–l9X16nå–Mâ¢^¶, ÝPÒ Þ´¦*óêqÊß… Ü­ÅS”rÚ”žá%H§ë¬Á)KKƒ]íÚ<Õ]?£ Ñ•‘hødÖ;¹¿;6&ðuYvÒëªP«Ph1h0çÈ”ÿ— × ’rNIÆc¨«i"œq»?6T¡¨(öI®Êïì_C÷vù<Ëq-øaÞË´z{-@gÕ*jˆ±Gs>oø)&¯Æ8rìüUÖ鯠XÝ:ÞJÕÊLzôEœßwÂ=HI×êNºðÓà,\W:¢RqÃÁO·Å[õ¢ÖSa—ªÒiî*jŠV´?=ÀTë€rRqí鮋JL]ëŸ õ›väÕrÇPHÕµ£¿& y¶å±ñ 8¡’—¡„§?Ï[”ÅÕ'{(Ø=‘Í O±îêã1ÛŸ2Ñ¡Àr“:•n@KKYQÊÛp¤€œwÑÊ/T}ëAcêÝ™rHÙZ$æãÉ¥:ëõ ™_©Ëó>Hù|‡’£ÉX#DcñÖ¶·[ÜÊG{‰aø¥úÆßÕ.ÊMM¸Ô—ªµi1Ø.,R˜ìFXOÎëN(„¤®Ç[ƒÛ«’ÕÛyy'\}»¸e>G¢Òäý Û‘Zæ—|¨‰±_pϯË[3ƒb¾î*=¿UÓ¶vÁ̸®¨ÕIuÕ7 %i\y®(´„­Dž dþg¾µ&+sm¬X{×6¦äƒmÒ)VÁmÚ¡HQ~C•6lˆä‚P•Ky$ä}1©DƒüTnÆÂ#Z˪“Óí=5'¦Vk¬2>93¡ù‘‚ P-¤a?6u ™uDY²[êG"{§=·§Å&[MÁ\™+.•¾ …µtd|º„îãµÁM b(תi÷|Îhn»O¢´•)ÆI’òÁõVR•Ÿ©ÖD­³k¡ârä¼ ÙµXõç>"EZ Ž´Ì[YTŒú§ËA)@újMt¡Õù- ~²U±,u[­´ÄÉŸk<‡ZàߘIîF2;÷:бb[âÙJUªKµÈÉmW%…Ð$AõO~ÚÓݲ%ÁO –Þ/ImM)=‚>‡ß$ioim½B,)1ú0¦Ù”kÛÇvSèP¢¥_3´ép¯ò+ #ýýyî–V½.…˜¸ÉanÞ/ðoŠû'òzÑAQÓ¡Ò“}2JoÊìÃnö"ðL}‹×ô–.ú5%ô*Øé¿bãÙÎ>Ô‚”EI?º¢¹Î'ëû vÆ“Í)¨t}óŒÿ˜à_'ÒLô…eV”&Îi½ûþ7Ww£Û¥Û|ئ¥.|ÙÆTÒFx>Á+mdç° —øœkŠŽ3Í\m@¬#DFˆ"4DhˆÑ¢#DFˆ1z‡Ý¶¶Ki*µâ’ä¶Ð#Àh «Ï”áeø+PÏÐgYºÔŠËº™z¡½¾,[±gÇ mÅe½ÝÜwÀ$9YiÕIDRF@+â€>σí«00´¸¹7ÅBØŒ …·7 ÍZûŠ–Õq” ¥xŽñuÓÓë©LvUQËiq¹Ëg’È$•pôÆ{h¶,—%J;*NV ™ÆP“Ç·àu°¨ö•6ñ¦ÛØwÂíÅ\Ã[ȦWÞóua.”€¼!*'þz©X×¶jíà•ËL¨ÍzŸkn5Ìãs+wM»Td%Q Ucù ¸‚Ë+ÏÊÔã:æµ4²–²;3k{—sÏ’=I>ooÅ9UÓ½ùtQàò¹™©QÛt[yô2Rp¹VG®N¤óS€pŽ|ߊ£5`NE&¯ÉJ½;øwÔ.mÄ5鳕R†RT¹µÖ?ÐÒpUÚÕÜŒàjHiË_ä£="qݳáey¬~œm-©™T;:!NO–D©%ÌwR .O¦5| êÝsަSûBe×*æLNjßòÌ„ÊI|£™€‚} A9ã©®OÅ`NÊÍ~jñ ¶WAØ]jÓ¿ÓaWœ©”¬9= ;_AÀ,2‡Bñü9ΫÔÍPÃ}‹z`§ÿXkvþj‘Y;'»›¥\ª{ivÑÝÁqºÛ1‹ –e«–ž^äj¤rFüØÕ¹@›;)»f|5ç7!h½«ÍÑ©8—Ñj† [oøI ºG ‡S8Îý\»]fœb}ª¿Ýl½î´o`ü=.zÛÄ¡YVÌkÎC·r¨ƒöµdçºÞ@A$’@W¶­AÇr7}îüU*­3Ú9¬Ùˆ·T>ôÚ¾6«·ÔŒÑ·êTúT—©3Ûi&CöŽ! ¨wÁ }µ€&%…òãù:êGS,rLr!ý—ÜCeE·Ã¢‰0w>Uýµ5¿ÔkÆAJjÖ}]ª a¤€"¸&0I/ÇóLm³‡.JSÑ»8£=eÞï~µ÷ø©£¥oíµéþÏ•_5Zý¸P©•º¬5s×#' m;öÉþšÙ©˜¶¤ÿ.åËyϬÔlµ>¡!èôeI¡Ah¥>Cù‰9ydã¸újh`,"¬M^oU¯Ýdù£ÐE*˜nD‡¹wuKYQu^äýá«b ”ï´”)È­‚…¬L bÂëæ¾Í–Ðy—¹eK'Ë9ì üµ®¬SÞ_ ÇuI$­× =ûöü†˜E~Q-ZÚÆ%µ½d¥hÅ¥G¸•1+ìøl)Ï&*~[§R3ì{çYf¾íÚ±¹rÌm—$!öCr%°°C‘Õî…ßòÐðâÙYbÙÚ\+LXíË“S–Õ.‹Kasj“<[ƒ©kY=’A=βÑ…„w­_ZDÓƒÉ,„³r'ÜÌÊžXýCÒo‹¾îët×öFËì‹2)Ûwo¿€õJhG–Ê[BˆÕ8°AïûLwÃ$ëÇÓt‹Jë**wË'Úvõ߯ÎݸG{’ý!Òß7òsÑAèE âÒUbÇU+z¸¯hüsËØg]—ïîÌÝöWK׎øî õIvkXrèè¸ß#ê„9!å-(þßÓ^¨ê|ê²Z±ê¾CÜÜ»™™¼çBŒb€}Uz¶š‡:Œ¶]h$!¤LYOÐùA ÿ>ZĽ]¥¼E´*Þ겸"4DhˆÑ¢#DFˆ"4ETº¸Ýh¯ßïK˜ËµKwncª{”ØîeuYêNOoO,‘ëüJújÜÝRªœc%^Ö·+ÿ¨ ºím©û×X®|eÁ-×ã±â©Ya¶ùƒhh¥*Q÷IÕ Zzâú²ÃßÚ½¿Jº#U¡4‹®˜îÕÍw¶á~}Ïv³ø«NÒ’¤á(W~Þ½Ïòéb^VK’¹C–Ûi[Ê‘8ù1ê¸Ôg‘)Gª[Ò+Ï2© i·šSêû¨æ¿,oMBXDÝqmhÝî—'Æi†d®‡53mÇX}Ðs€V3騪˜ž-•´Xö•.¶ú_vïß+f×I•JrçŠüÑT®ËgçLVÒ²ÒG,!}Ð}u@)±–®Î̯k0 ’µOE4»®8n‘B¨Üm òÛª*›NBª1ŽK#î«¡L,8Fê«Ô-«)·lzMnÄ¡ª6$:$ ¹—æÈC‹Š.:”+¶­$Û;•G–åÍ>[éÞ¤˜ªPS¥¼­Ù),öÔ¸]UÄ®Ÿê>ZDyК§JŠ”úT¿/¾åßMQ?œkާÐÕϸÕ!4õeÄPq¤‰>˜YõÊ–BR³ô':ŽaûE,2ܶsuuũҽÄ){›¹û]hŠ{AÃHºnÈïÔ[B»…¦u¸ùû¸F¶p”ÙÆÌûì9xÙO €Äû>å¿ã‹ÑîÂS‰H¿7¿rM³qÙv‚šcd`¥´Ìu¡xýô¡_†¶ÚÙH"]¬Eòge Yå & BÙá}Ïß›;¦COxuo}Qú¼¾¡:€·®É óß­Ü­Ë:j‚r[qhŒ´g Hö^r³¢MRx‚¼ÞNx¬ÌÜ›ƒ3¯³ô{ËçHô0ù­-'›ªÕ;~Éqãu9î6än@{c¶»c×.ýòéšý¦&UJ—xG*Ô=Þ4‚ó`©¸Ï ­m¨Œ4Å^ºš¾zF ¥hšÅ}æ<ï͸>w^o£h^–t–}ÒjG¯+Ã495<ÜÎùov{q•¬{Ûoº·¥°õ=ÖÚøüm)Ö˳¨ŠPŠð4wù]NSR‘«”5ôšB&š‘ñ3ðõ‡±×œé§BºAЪǣé%£µõs3ú9™¸·'vÏ çÅšÊ5Þ;šÝÛ*UÇGÚÈ÷~èÜT·EViÒŸ®C²Ò¯7ÌzEA”*:-, ù¤¥|€ÁÔ:OKSÐÄbGŠAÞ#´M½žöÉ­Ûek¢Òúv¶”ªii¦¾ª¢°ä V"ë3Ý›‹Z÷MZ‰ÇX4ˆôJí¥¼´î ó~KÑ"yÒŸ>®ao'žà{k…IåBÈ6¹³ñÉŸäî¾³Yü”:næEKYI#_/´f·~ßGñ Ø ‰J ½® |¥5šª‘1¿Ôë­K4,Y­Þ.ß…—Ò?ÉÛÊ5ÿEŠo÷RµÿëžPºŠÚé”ÔI…»6 eÂKõ–#¸?ÛŠK‰ü”uÒý+@ã‹^¼ßšðÕNº_¯ š*lmÊ7&ýá¸üWÃ{éfU^!ÞÚv {„*°Ø~*$$]lF‰öF`øÛóZŸ“þ™0b-Sá§%*ì¥Õ›J¡Þûa7)ÎX»éŽ$çÜÿ.ÿLjÃHÕq÷·æ¹2ôwNFD3hêï§—ü)ßoíí^eÙÂe>¦€¬¥èÒã˜ÌÜ’âVSØwõÔº“~«{—&sÔHÐÔ‰ò!6wðvºxØÛZz )®ÑV¢êä*§ gܤ•áGÛ#Z…4çÕ ÷)u–"‡ãn²¿ÉtÝ{ßbÙO|%sr¶Ö„–FC2®šz8?³óJòÒ¦P§{TÇûNÃóu×¢è·H+EŠFÔÈÜÚ-ïÃdÆ­uû°ºŠfîí¼ãƒ84ößšœþm¡Cüu@ô¾‹µH{ÙþK×QùòƒT>‡E~Ó€|Ý’b|VzmmÚ•G8!\m¹å+?†:¬]&У֩q¿àºáüžü¦>ÐèÑÿæüK¾›â«Ó;ìü"÷B[Ç‚¼…9G©Ff2ýJŽcñ9Ǧ{j='ÐÄZ¿9osÛÞì̶ÿé÷ÊH ‘hÖÿå‹üké>&Ý:E§±5;½¥hSƒáésS!²PP¦B‚•—·}OúwEõ¼ä}ê¹yò‰‹Wú5²·õ±Û>ã_4_›«Ï%¦·…¸î¼}gPê,§'øœ,q™:ÇóƒDŸV¤|ŠÅO)4ã¬-vofXŸá¬»§}7ªýœ«eØ{×¶«är Šãðqß³ŠJ¿¨ÔÁ¤(°Ç8?üMù¯77“nšÅöš¦ý‘¹|Fén.ümôè®&6ííCœðµ)70ê1Ü(,¹”ãß¶­Äø°˜¿‹~k™/CúMÔš"©¿äÉù&fùx—l¾Û”¹pn³íM”™i¶*r¥«Ð6d y “€9­'éª5ºoGÑÇúÌâÖà.Ä^á½¼lÝ«Ôè#}<ÓGètyA*=‹sÂûoà*(ß:…ɾ;#;q:ššž›ú_ <‰‚×iÓúÃz¸2Yeåœ:¥+š <{#²^G*'ÒzqžCÍé¶úä7ãÀþofÇ|½lzC¢þOO€™´––vÃçÞ‚kR;Œ»YÉ›‰YÊ2‰ºÚú÷5ÕlnæYËÛ>ˆ¶½â6Ól<—¯y-†ß‘Žî6y(¸îx|Å´’óªîÞ¡¢G #×>%oÂü7ßhÏ«ñúúúŠÚÉ4†‘äšGww|÷ý~ f³-¯VäU帩Hm´«c§ƒPÐ>ëm¤vJìöÔ¡„™s·§nÀÅŒÎòPjz¦½ä>„Ç[A¬ê¥ËýÜé. WH1cV¿T×A"4DhˆÑ¢#DFˆ"4DÒÞûÊ]ƒ´õú¼Mƒ×c•#’XI⥠÷H=Ïᬀܖ¦öež1=ZUºÛ^žëôÔ™1nËõk¹Iõ©FmÒVÑ=ñØzk©A lqÈ7gÿ%NHu‘¥­Ê²Ûé?Ç›‘<¢ÚßšzUSaàXCÈ#Š02S` ÿþ^‘‹Gté1ŸÁîÿ‹|Wé Lý-ò?Y C^mìñ>wÉ„³î³÷2²’,7é ù¯F^ °T–½ˆ…—çªA¤ÎÉj—O•Rq–Z…2"†T·W€P{àwÇmH쨮ÉbŸaÑ€¶•L®\5ù !m7KƒÅ=òâ +ý4Õ³mø-±ßf6H{…jÛÊj×5 ãìÔ8mJLw›˜Dd)´ªýu¹€;nu©áÙØ¸m»~ŠÜ„Ï‹¶Ô0¸§ÌLÙ×-ÖÂ{ €÷‡¾´ú¡uZj°‹í¤fì»|“ uºÙ¤7u·G}ÛLÖ\xD‹E·Ð+Ut/ˆPζ¬z'·¾©ÖiZAÃQ ÇÙ|ÿu®ÿéô7C´ö•Œª4m…®òj£·ŸS~ý¹ÚÜ&ªr °?Ãïu©˜8õoÞ¤m’è+§®œŸŒ«/§­¨¤K„beN3·E Çu ÉVO¯¦ ó`=©®o½îûÝFò“õTÝNÝ«‚”ùb$¸Tèí§j-*#M7ƒè–q­õ16βÓd¶”{ºV%“½n-;¶;E|®šU5™ÛF;ò[pÄ8’Ÿæ8÷÷Ö±ÑÂO3wd¶ÖŽÍ×¥õ½MtÛ%¶/JéQ¼Ø‡ÍŸN±æ×¨,Å—• §’Ëi¸¸Å9ÕÈGeŠ3V|¿‚Šj=xjä ³ûï͹?jbtE³­»µK²Ð­ßôŠ%ÑN ­Û6MU4ú5À…,œ»’—ÐÂó…1åã>ŠNN¼çójêjYN&Ú`‘pgü쾩OåƒO†Š‡Eé(á¬8ðËQ°ã¶ç¶â!õJíÉï’H> d›Žj¬þ z¯Ú™[ÔøT:Ìx´zYí„¢;.—ï—9+Ü룣hŠ€J í? ßí=öß¿»rð½!ÓÕý"¨jî‘ÌõSîÄLÙ7Ý·7Žk®/Kxm bÆÛ>»,ýÁƒó$BÜPF[˜SËfRÕŸBK‰Õ™ç¨•­<1ŸƒöX}Îë‘M¾ rkv“üÞÉ3p7G¯¾¶MÛÑ·M›à¯0²ýFÙòd*B}”ãÒ±Ÿ¯?ÕçÉG£ ï57€»x]‰þ+·K¦«avÔÔÈ?³#öTMTëìB¤µuøH"UV–çH¤-†’bÙnùŸÁ$êÑ]!ÀQ;?`Ù¾üržt–Ã’™¯ÎY æi©PëÓ§ô¸±|øfní«ØœÁfjôSqñªòôw£®9ßåò‘uaò±Ó(¾ÏI‹ûLIQë÷ÿíeµrtá¿¶ +â²j¨µŸ¢+ü5t3@J8Fgg囿÷¿èbò÷ÓøGÖ1ÛÚŠ/î‚÷¦õGáwtVºˆ³ 7"%äzçÑâEüáav†°†ýÿö×Z/åÓP×jdïÿ1KÒw3Âýö¾&~ÿomamŒ6ËÔ‰?"}x¤zwV¶&Ð3eÇ–6ù;_à­?òŸé˜ <4Âÿîð–ßÎïX›vˆˆÝ¾`ò ?S)ðîÿjh:¡fI]üKû¢ËÏ×((uw9™ýˆ‚ÿõbù®‹oÆ+£*Ë®3`ô?{^4® -¤>=‰ÂŸPÏâ5sù»ÑÈ^ÄÍîwþÑ2òuSúq/Úi‰ýø±dîOŠ…ˆ£ìO ˱ÖÜ˶ێ}°D#‘©ƒBteúÑßþÿæIÓ¾•È^“JÎïþúOñ/¨Þ-ÑiéJ…Ýe £±?ªNŒgñ0u7è~Œaêÿ¢?Í@}1éõ´CÿÏ—ükÝ>0–JžWÚ¾7[ ¨ejbßRŸoþHvþz„ô/Fß«ýù²7LºLÝ]'6_í¤ÿJªø¼tÕ9v¿áá}Ò€ûËfÐöî[F¡- ÑÖÙÀߺÍýõ~(½3ìô¼íÿ4ßæé«Uñkðô¯Ém5þ“÷j„àÏ?…(d#ù"b3ý5 tO£sfæÁû߃».õ7–>ŸÓ}ž•‘ÿi£/í¯ˆþ"~•Y,¶v{}!<âû ­Õ'ð*ã=J ÿª ÖŸÌn³b×5»äü¿Ñ/žQÿÿñÅÿm”½³ÝpVnJÀÑ@•/µWɨ÷åõ LÆ Â^K¯œcæ ÈIÇî]¥Ñ]¡&Ô´Ûu›ûÏwlòÜ=ëÃô‡§&Ó‚ñéÊù%õ]öplîu ØÞÕèÝØ{§ÖÎæÊý¦•.“cRÜS6••%|!(K¨M4ÚR®Ytk©,õU#«›ÑÄÏ|#ó~Ûq{— VÉyV0³esêõ·«YqäÆÄv“$X¨K1 4„2Êd! `©äÊ+®T•Œ$9=~ç|c×Z»&$ééüùão°qÁá ,ª[ä?ÇQUIm²¶ú®¯#DFˆ"4DhˆÑ¢#DFˆ‘¯ëU‹âɫѤ•õXnÅt§Ô%h)$~8=µ–{-Hn²©l™ª*³D$O‚H—¶}Au;ÅÚ–ÞEµ¨éâ¦gîýÔµ6è>«Ež—Uœ~ëŽ'óÕI)o¶–I[“z1üÝti:lz?gAÑÁJþÓ¶_ß;3?ƒ§}¹áá.m e.îß-Í›HyM©6ÞØS"mí¼±Í·L·Ð{üÇ#é«”ãOý+¶n-r¿ík¦4µn˜•¦ÓÉRmþ´î>Ö‚’6¤=‡é>{’¬ ›Ûz žê«TÙ~ꬅœ)3&)JA'¹ @†¶(BÅ5Éø¹=þ•–ÂÂ93nµ›ÁM’¯ îâ8„=.â®Ù 5O‘ðí@i¤¶?¦§Ùn®J"OKSjkmÅ|SíùËóšFW"#m¢>d·Íi#ÜŽúÎËqdÕ—$¤çN×Å;š¢Á€É%2JˆÀ• žþúÕä¾­ß…’”>î7–ýÃKc#7 Kÿ5 G­eK®ŸùÈ—%.I»äŒâ==¶ÿ¦Jµ®µm©í]ªèn‹*CŽÊ¯Ü”ó~RзKe'ײP{}u–šÈTÂêjOn<ãcÌm ÂþlõÏ®¡VfüÙ_rdG‘V£D~l<ü4Ä'Ë“#!ÄáI܃נּÈ:«R~²ˆï~˜ªvK*^w"R¾õ„ÊB…²T@÷$ÏV‚ _¬©¼\Ô;MyNDb•·&>|y,)™Ê’{)µ¤žý²5¶Œ[[Iu•s’Ü•DqŸ¸¦dדõâ@:Î/eiûIÁë»$BgË«\>RAYqA O` ñÇúc'Í}UìõÿtP\CNÜu& !ióYp'Ó$(ã룻·[ä˜,¾en=væem½Q¤Ôx ­©ô¸² ‡· Ôe>Ñ:Ø É¥pZ6½Û¦·¶{WX+¶”W¼{ãï­šœ¶p2;|t¥°éœãÊéó§³-J$­V„a’=Ï}Gú:Ǩ³çë;ûÒõ oìI(4¥ÙŠBš*©öœp¤} HñõA”ddü]:)û—Y£¹Â#ôú3)HÂ`ÓØˆ>‘«Ve®iwµ¼7`nÚQÏa!¿ò ÖÖìø-±’è^éÜŠPårTœÏqûT”ÿ‚u«32Æ+¯•nõÃæ7,ÔrIÞ?¡YÜ]ô¸ÜJ‚nˆîLwGô(:ÃýU›ºð‘º•Ëò(îö?;´ø™ôú”k]P{,–uçý±M¦ñSsèlc(§Ã üÇɬy¼^Ã,_Õ\õÍÍ©Üè ]‘!¬cÊÓcéò6u#d;"±²’>Ò‹$MŠ×göè½O©Ö0¬\Q­Î*¢ò=ÿ½IQß±ÖÉq_.T"¥\D˜ÀÿÕN{zœzèø“tôÞêj›ïGT~r›ˆÄ•ºã(S´T„É`œ÷Ç"3¨æ š¸«oªk "4DhˆÑ¢#DFˆ"4Dܼ¶–ÙÜ(Åší“Vhœ”JЇR0F²ÏdH é_n›S6]»mc‚¢ÂCcÛ([ëd昽Uóqt«·×KÉrm©GuÔ'Š\øt…¤~Ö¸–¸7ê} í­I² -}ì8¥~i$§ü5’7~² êµ’gÿ]¿eeMGŽß"†m³õJTÙZ»‹ðMHú«Šw@L9Éýľ¨±0bS×3?ú ÿÕ ‡TY5kÂá·hGYv£p^uy ûË“T$(þ I­tÔ§½£ ¶¡2«bAIõTìÉ*üÂÉá­q’Ù£ò·¶žØ´RJ·èôÔDň†‡ôHÔÝmaKée %~CX[/½"4DhˆÑ¢#DFˆî­º¡^å¥Uéúš˜Ï–© %ÂÞ}pHí¬‰;-úÉ5½Š´[W$P)È?ê´þZδý¥®¨9/7¶É’…%v­ hZ¹)*†‚ ¾¸Æ3­µÇí#Âê²öVÆÙ®ÿéŠ ` °p;c°ÖºÂ泫nKÉ{g<0å»Iuè\t©óI즳Œ–uaÉ|µÓņ˜‘fÛ Vs‘Mg9ÿˬk šjÇ’é%h6’mQ¹†ÚÈi¬.k¶ä¼%týcÏVdÚ6ô¢~nà} qü´ÖŸ´±©Kæ7Ov˜²íf= )l$ý:kš4 Þ«.ßì‚ÖJBE=b ðÎ2YÕ‡%æ­“´¢VèáGÔˆÉ_™'ùëÂæ±ªKÚ&ÏÚ°?¸¶èlýxBm9þƒMasYÕ%ôöÕÛr…ЩJCûkÉgò_ í¬ÐÂh€=1ð¨ÿÛ[c%Xò]MížÂpŠ%1{&2ü5®'[`eävÎÞZ‰4Ja'ë:c%Xò^íí ¤á4zhýº?öÓ&ä½bQ”œ]<üÿí¦'XÀ<—švö„@£Óp}ÑÑßü4ÆK8’P¥PaPY-Á‡#d䥖’ŸÈ î²ÌÌ»5…²4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"4DhˆÑ¢#DFˆ"ÿÙkombu-4.1.0/docs/images/favicon.ico0000644000175000017500000001242113130603207017070 0ustar omeromer00000000000000‰PNG  IHDR szzôsRGB®Îé pHYs¯¯^‘ KiTXtXML:com.adobe.xmp 0 1 0 31/1 320 2 1/1 80 46 80 1/125 0 0221 0 0 0 80 0 False True 3 3 False 4/1 0 28/5 0/1 0 0 320 21/5 0 5 0100 1 2 NIKON CORPORATION NIKON D50 Adobe Photoshop CS2 Windows 2008-03-05T13:23:21.80 2008-03-24T12:01:10.80 2008-03-05T13:23:21.80 X¡ã _IDATX ¥W[lç=3»³»³÷›w×^Ûß‚l¸T(‘’m“€DÕ>UªT)ÏU©R•—ö©oTjÕö!R%ª>$ª’´¡‘š IÄ%€Æ`cƒm°×·õÞï³³=ÿ¬m¨w«<ô·¬™Ù™ùÿï?ßùÎwFªq`×Ðu²,ïúõÿ»ÔÊÈ&™ÿ¦ÿšÈ¼}%â$ɸœ™™E2ÂðÐLf3R<— ÃÌs“I‚…GqÎ7P®VQ,QázMGÁ[ây¥TB.“F:FN«bäða¸].Þ²ÖNœkgÊeŒ_½ŠÄF«éåǼÍG¹¨lS`q:`uº../-BÏç‘wˆë´ª†’ȺtHܱžËNj‡ì¬±}²€ôTv‡ ‡ÚÜ,´@“ ’jG­\bK77‘5Û 0hç¿?‚zòUXzPJ¥C V™x™‰˜Õ™0ëTWb¨UÊÛëîw°meUMƒ®º  ¨x÷XmåuÈDÅj±#·‘Ä募‘œ=÷dóù`Ëèð¶F`±Xž,²uÖ´ÖD¡CÆÆ`y0‡»„ñw'ðÑõËðÚ8I)½”Ås#DZ4wo¿õ&T«{»:°¶ºÉfÇÍ™ÜY{ŒÕB…|Î` +¨q¹Æ_¸x–/äú÷CeÔwîã7×®A‹­`¨«™|‹Y!¸½~\¼t oþòWp¸]¸qãžÜÁþ}ˆ­®"C.­|ËÔfwÔ÷,ÈöÔhš‚J±d08Ö½ç/|€öLÞöNÔ(N¹l¥| ±G㘟?ˆ3gÏ`yq’ÌåñÁéP‘d5¨6+TBî`5õ*[ëÖu®‰8š nT é|¥ŠÅ± ì;r%–ЄGª–Ð×7‚Ø\ܹ‹D£QBC¸‚¡2—±0;‰p$Š Ë°\b s±«‹o=ˆ¦ÔXB2p8 „ñ¬_“BÅ+‘lüÝfFQSP*iÜ¡‘Övd’ILÞ¾… ¯V«ÀM±ºú·÷ÐÑÑ_¨ –d³Ñ”"B KÊj·£Ê·òT:¡p"új1‹l&õø:ö "ì^Ä–cˆSœþô‡søóïÏQ7¨ùü÷ö1}ë²ÉMhäC³Ñ€€È‘.„ƒRbeݺ­ ”VNh±((”ÊÐËyD;{ k‰;c}m ³ÓSøéÏÞÀ¡çŸ'? P aR‘&¬6•UEdØ'¤]M®!c9J®D7…Žž~*#Ï©é™ä: ‰EØ=°§³ž†7ÐÂû6ÖWpýÊe,?z„ÑÑïÂé ¢\ÎBaÚ*•ª Àümˆ*1qAY«Àæ°£soÖ–æQÌ¥°oÿ!´„NAu±ñP5 ¹</ϲ\ÀÕ¯¯áØ1Šåxó{4øÃ›hí;`”a9_$²ìR»Fu…3 BÑœvKSð‡£hëèB¨µ ë›B¡€~î~êÞ,ªå[oÿ½}}«yò•ù§<»mðð(³‹–˜:‘‚Ý£!‘pQïeÖîÔø7˜ÿŠ=§Ëƒ…ùEܺqÝh>±E景·¿÷&ncnj–0lî,¯ÆŠvbàð1¢å1R"*¸µ{4áh$Êk51±¹†¼&1ßNÖtÆ®\‚Ɖì”äT|íí{pðÀ0Ü¿‡"›ÏÇ~H膄•>ì ùHH65Á¥Ý£æVçîküä©£&Ãb‘Š¢<·P\>¿ø…ÑWiV@oàæý*Kõì„áá!|~á}¬S;ûŸeW­¡Ê vÛ1LSd™.†–ª*ìV¦GÈ8;^5^د®¾}84r”ä¢>°b„Þš˜Àòô8‡`ãñ:¨Œ 3·!¿~šTD(ä!Ší¸–¯ÈUݘš¯°9e‹Hs7mÑv$ Ü™7D)¾¾FÏ—Bq3†P8ÕÁ?ÿz¾`íl\ùÔ*’‰Ñz{IÐ-ÞöžÆX‡"S’YÂj"ÔÑÓ‡££Ç𿿇‡sS¨ §S©àÈÈRkËû×_Xr“h y1r|”fþ-­Ý™k’‰•³ }KMë½@,ß,F»”È|;…H1J%↖:btë›1\¹ô~ðÚÊ,eùá"M¨‡ÍÊBÉ–¤ôÆWrÔŒV®*:?9‰ç¨’·‡sŠmÕ›Q†Z‘y*ŸV“Ðñ HðîùwHÄ€1i(ÂK'OÁ–ëd/0ùü<±Iç´Bß`VœØ "‘µ@Uiãe˜¸!‘¾ú¨/.Φ^RË}ŒÖDû-¼œèç÷ B¹„—O|£/¼È`Â,¿i¶j ÓwP.¦™ÿ tÎ:8ú=„;»YzìœNò´à!EK¤îIKn’áÛ¢Ý]X¹3 'Ûq Flò4;‚­Cxÿü9Ͼ֖¾øôc<¾{ÇOÿ5zBñ5% é–ÜnØl4¢DÉ¢˜á°ªäËV×ÖéÚ¶h€@ _¬@UÔŸêÐ|çåÓpÓñ|ùé¬fØ*ED"a¼tâ·ˆîíE‰v\TŸ&Ôãö²ëñÛ€¬7ó(ŒŒL'•¢ÑL³wDw–oÀ2s¸ú`-mQ2¸‚O½ ?0"m{ OàwÿaŸ û{ŸQi»µlvBkV­ðÒ1)‰M#…¢#ËܬXQʦ s3²R'öv ;B´M‹Íd ÅÅÊ%ÙŒžhLT2‡ÝJµkƒµ¿×€ÚÁ]eé–7øùVc0¢} %rKJˆòœÉ'¡%ò@‡ag{—·ØOpŒ!±&EMìŒmWž0Õ8a‰ßˆ5ªˆöJe9)Ô‰€W ñb®P÷zdVýãT.þÈñÔöä"˜Ód1›`eðÛ£!€íßvq?­hßöüÿºÿ6=þðd7 IEND®B`‚kombu-4.1.0/docs/introduction.rst0000644000175000017500000000033013130603207016751 0ustar omeromer00000000000000======================================== Getting Started ======================================== .. include:: includes/introduction.txt .. include:: includes/installation.txt .. include:: includes/resources.txt kombu-4.1.0/docs/index.rst0000644000175000017500000000045713130603207015351 0ustar omeromer00000000000000Kombu Documentation ================================== Contents: .. toctree:: :maxdepth: 2 introduction userguide/index .. toctree:: :maxdepth: 1 faq reference/index changelog Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` kombu-4.1.0/docs/changelog.rst0000644000175000017500000000003213130603207016156 0ustar omeromer00000000000000.. include:: ../Changelog kombu-4.1.0/docs/make.bat0000644000175000017500000001646413130603207015122 0ustar omeromer00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. epub3 to make an epub3 echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled echo. coverage to run coverage check of the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) REM Check if sphinx-build is available and fallback to Python version if any %SPHINXBUILD% 1>NUL 2>NUL if errorlevel 9009 goto sphinx_python goto sphinx_ok :sphinx_python set SPHINXBUILD=python -m sphinx.__init__ %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) :sphinx_ok if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PROJ.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PROJ.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "epub3" ( %SPHINXBUILD% -b epub3 %ALLSPHINXOPTS% %BUILDDIR%/epub3 if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub3 file is in %BUILDDIR%/epub3. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %~dp0 echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "coverage" ( %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage if errorlevel 1 exit /b 1 echo. echo.Testing of coverage in the sources finished, look at the ^ results in %BUILDDIR%/coverage/python.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end kombu-4.1.0/docs/includes/0000755000175000017500000000000013134154263015317 5ustar omeromer00000000000000kombu-4.1.0/docs/includes/introduction.txt0000644000175000017500000002407113134154022020576 0ustar omeromer00000000000000:Version: 4.1.0 :Web: http://kombu.me/ :Download: http://pypi.python.org/pypi/kombu/ :Source: https://github.com/celery/kombu/ :Keywords: messaging, amqp, rabbitmq, redis, mongodb, python, queue About ===== `Kombu` is a messaging library for Python. The aim of `Kombu` is to make messaging in Python as easy as possible by providing an idiomatic high-level interface for the AMQ protocol, and also provide proven and tested solutions to common messaging problems. `AMQP`_ is the Advanced Message Queuing Protocol, an open standard protocol for message orientation, queuing, routing, reliability and security, for which the `RabbitMQ`_ messaging server is the most popular implementation. Features ======== * Allows application authors to support several message server solutions by using pluggable transports. * AMQP transport using the `py-amqp`_, `librabbitmq`_, or `qpid-python`_ libraries. * High performance AMQP transport written in C - when using `librabbitmq`_ This is automatically enabled if librabbitmq is installed: .. code-block:: console $ pip install librabbitmq * Virtual transports makes it really easy to add support for non-AMQP transports. There is already built-in support for `Redis`_, `Amazon SQS`_, `ZooKeeper`_, `SoftLayer MQ`_ and `Pyro`_. * In-memory transport for unit testing. * Supports automatic encoding, serialization and compression of message payloads. * Consistent exception handling across transports. * The ability to ensure that an operation is performed by gracefully handling connection and channel errors. * Several annoyances with `amqplib`_ has been fixed, like supporting timeouts and the ability to wait for events on more than one channel. * Projects already using `carrot`_ can easily be ported by using a compatibility layer. For an introduction to AMQP you should read the article `Rabbits and warrens`_, and the `Wikipedia article about AMQP`_. .. _`RabbitMQ`: https://www.rabbitmq.com/ .. _`AMQP`: https://amqp.org .. _`py-amqp`: https://pypi.python.org/pypi/amqp/ .. _`qpid-python`: https://pypi.python.org/pypi/qpid-python/ .. _`Redis`: https://redis.io/ .. _`Amazon SQS`: https://aws.amazon.com/sqs/ .. _`Zookeeper`: https://zookeeper.apache.org/ .. _`Rabbits and warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ .. _`amqplib`: http://barryp.org/software/py-amqplib/ .. _`Wikipedia article about AMQP`: http://en.wikipedia.org/wiki/AMQP .. _`carrot`: http://pypi.python.org/pypi/carrot/ .. _`librabbitmq`: http://pypi.python.org/pypi/librabbitmq .. _`Pyro`: http://pythonhosting.org/Pyro .. _`SoftLayer MQ`: http://www.softlayer.com/services/additional/message-queue .. _transport-comparison: Transport Comparison ==================== +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | **Client** | **Type** | **Direct** | **Topic** | **Fanout** | **Priority** | **TTL** | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *amqp* | Native | Yes | Yes | Yes | Yes [#f3]_ | Yes [#f4]_ | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *qpid* | Native | Yes | Yes | Yes | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | Yes | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *mongodb* | Virtual | Yes | Yes | Yes | Yes | Yes | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | Yes | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *in-memory* | Virtual | Yes | Yes [#f1]_ | No | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ .. [#f1] Declarations only kept in memory, so exchanges/queues must be declared by all clients that needs them. .. [#f2] Fanout supported via storing routing tables in SimpleDB. Disabled by default, but can be enabled by using the ``supports_fanout`` transport option. .. [#f3] AMQP Message priority support depends on broker implementation. .. [#f4] AMQP Message/Queue TTL support depends on broker implementation. Documentation ------------- Kombu is using Sphinx, and the latest documentation can be found here: https://kombu.readthedocs.io/ Quick overview -------------- .. code-block:: python from kombu import Connection, Exchange, Queue media_exchange = Exchange('media', 'direct', durable=True) video_queue = Queue('video', exchange=media_exchange, routing_key='video') def process_media(body, message): print body message.ack() # connections with Connection('amqp://guest:guest@localhost//') as conn: # produce producer = conn.Producer(serializer='json') producer.publish({'name': '/tmp/lolcat1.avi', 'size': 1301013}, exchange=media_exchange, routing_key='video', declare=[video_queue]) # the declare above, makes sure the video queue is declared # so that the messages can be delivered. # It's a best practice in Kombu to have both publishers and # consumers declare the queue. You can also declare the # queue manually using: # video_queue(conn).declare() # consume with conn.Consumer(video_queue, callbacks=[process_media]) as consumer: # Process messages and handle events on all channels while True: conn.drain_events() # Consume from several queues on the same channel: video_queue = Queue('video', exchange=media_exchange, key='video') image_queue = Queue('image', exchange=media_exchange, key='image') with connection.Consumer([video_queue, image_queue], callbacks=[process_media]) as consumer: while True: connection.drain_events() Or handle channels manually: .. code-block:: python with connection.channel() as channel: producer = Producer(channel, ...) consumer = Producer(channel) All objects can be used outside of with statements too, just remember to close the objects after use: .. code-block:: python from kombu import Connection, Consumer, Producer connection = Connection() # ... connection.release() consumer = Consumer(channel_or_connection, ...) consumer.register_callback(my_callback) consumer.consume() # .... consumer.cancel() `Exchange` and `Queue` are simply declarations that can be pickled and used in configuration files etc. They also support operations, but to do so they need to be bound to a channel. Binding exchanges and queues to a connection will make it use that connections default channel. .. code-block:: pycon >>> exchange = Exchange('tasks', 'direct') >>> connection = Connection() >>> bound_exchange = exchange(connection) >>> bound_exchange.delete() # the original exchange is not affected, and stays unbound. >>> exchange.delete() raise NotBoundError: Can't call delete on Exchange not bound to a channel. Terminology =========== There are some concepts you should be familiar with before starting: * Producers Producers sends messages to an exchange. * Exchanges Messages are sent to exchanges. Exchanges are named and can be configured to use one of several routing algorithms. The exchange routes the messages to consumers by matching the routing key in the message with the routing key the consumer provides when binding to the exchange. * Consumers Consumers declares a queue, binds it to a exchange and receives messages from it. * Queues Queues receive messages sent to exchanges. The queues are declared by consumers. * Routing keys Every message has a routing key. The interpretation of the routing key depends on the exchange type. There are four default exchange types defined by the AMQP standard, and vendors can define custom types (so see your vendors manual for details). These are the default exchange types defined by AMQP/0.8: * Direct exchange Matches if the routing key property of the message and the `routing_key` attribute of the consumer are identical. * Fan-out exchange Always matches, even if the binding does not have a routing key. * Topic exchange Matches the routing key property of the message by a primitive pattern matching scheme. The message routing key then consists of words separated by dots (`"."`, like domain names), and two special characters are available; star (`"*"`) and hash (`"#"`). The star matches any word, and the hash matches zero or more words. For example `"*.stock.#"` matches the routing keys `"usd.stock"` and `"eur.stock.db"` but not `"stock.nasdaq"`. kombu-4.1.0/docs/includes/resources.txt0000644000175000017500000000132613130603207020065 0ustar omeromer00000000000000Getting Help ============ Mailing list ------------ Join the `carrot-users`_ mailing list. .. _`carrot-users`: http://groups.google.com/group/carrot-users/ Bug tracker =========== If you have any suggestions, bug reports or annoyances please report them to our issue tracker at http://github.com/celery/kombu/issues/ Contributing ============ Development of `Kombu` happens at Github: http://github.com/celery/kombu You are highly encouraged to participate in the development. If you don't like Github (for some reason) you're welcome to send regular patches. License ======= This software is licensed under the `New BSD License`. See the `LICENSE` file in the top distribution directory for the full license text. kombu-4.1.0/docs/includes/installation.txt0000644000175000017500000000070513130603207020554 0ustar omeromer00000000000000Installation ============ You can install `Kombu` either via the Python Package Index (PyPI) or from source. To install using `pip`,: .. code-block:: console $ pip install kombu To install using `easy_install`,: .. code-block:: console $ easy_install kombu If you have downloaded a source tarball you can install it by doing the following,: .. code-block:: console $ python setup.py build # python setup.py install # as root kombu-4.1.0/docs/reference/0000755000175000017500000000000013134154263015447 5ustar omeromer00000000000000kombu-4.1.0/docs/reference/kombu.utils.compat.rst0000644000175000017500000000045413130603207021733 0ustar omeromer00000000000000========================================================== Python Compatibility - ``kombu.utils.compat`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.compat .. automodule:: kombu.utils.compat :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.eventio.rst0000644000175000017500000000045613130603207022123 0ustar omeromer00000000000000========================================================== Async I/O Selectors - ``kombu.utils.eventio`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.eventio .. automodule:: kombu.utils.eventio :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.base.rst0000644000175000017500000000353313130603207022257 0ustar omeromer00000000000000================================================== Transport Base Class - ``kombu.transport.base`` ================================================== .. currentmodule:: kombu.transport.base .. automodule:: kombu.transport.base .. contents:: :local: Message ------- .. autoclass:: Message .. autoattribute:: payload .. autoattribute:: channel .. autoattribute:: delivery_tag .. autoattribute:: content_type .. autoattribute:: content_encoding .. autoattribute:: delivery_info .. autoattribute:: headers .. autoattribute:: properties .. autoattribute:: body .. autoattribute:: acknowledged .. automethod:: ack .. automethod:: reject .. automethod:: requeue .. automethod:: decode Transport --------- .. autoclass:: Transport .. autoattribute:: client .. autoattribute:: default_port .. attribute:: recoverable_connection_errors Optional list of connection related exceptions that can be recovered from, but where the connection must be closed and re-established first. If not defined then all :attr:`connection_errors` and :class:`channel_errors` will be regarded as recoverable, but needing to close the connection first. .. attribute:: recoverable_channel_errors Optional list of channel related exceptions that can be automatically recovered from without re-establishing the connection. .. autoattribute:: connection_errors .. autoattribute:: channel_errors .. automethod:: establish_connection .. automethod:: close_connection .. automethod:: create_channel .. automethod:: close_channel .. automethod:: drain_events kombu-4.1.0/docs/reference/kombu.transport.rst0000644000175000017500000000103713130603207021343 0ustar omeromer00000000000000=========================================== Built-in Transports - ``kombu.transport`` =========================================== .. currentmodule:: kombu.transport .. automodule:: kombu.transport .. contents:: :local: Data ---- .. data:: DEFAULT_TRANSPORT Default transport used when no transport specified. .. data:: TRANSPORT_ALIASES Mapping of transport aliases/class names. Functions --------- .. autofunction:: get_transport_cls .. autofunction:: resolve_transport kombu-4.1.0/docs/reference/kombu.transport.pyro.rst0000644000175000017500000000070313130603207022332 0ustar omeromer00000000000000================================================ Pyro Transport - ``kombu.transport.pyro`` ================================================ .. currentmodule:: kombu.transport.pyro .. automodule:: kombu.transport.pyro .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.connection.rst0000644000175000017500000000156413130603207021453 0ustar omeromer00000000000000======================================= Connection - ``kombu.connection`` ======================================= .. currentmodule:: kombu.connection .. automodule:: kombu.connection .. contents:: :local: Connection ---------- .. autoclass:: Connection :members: :undoc-members: Pools ----- .. seealso:: The shortcut methods :meth:`Connection.Pool` and :meth:`Connection.ChannelPool` is the recommended way to instantiate these classes. .. autoclass:: ConnectionPool .. autoattribute:: LimitExceeded .. automethod:: acquire .. automethod:: release .. automethod:: force_close_all .. autoclass:: ChannelPool .. autoattribute:: LimitExceeded .. automethod:: acquire .. automethod:: release .. automethod:: force_close_all kombu-4.1.0/docs/reference/kombu.transport.filesystem.rst0000644000175000017500000000075513130603207023534 0ustar omeromer00000000000000======================================================== File-system Transport - ``kombu.transport.filesystem`` ======================================================== .. currentmodule:: kombu.transport.filesystem .. automodule:: kombu.transport.filesystem .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.time.rst0000644000175000017500000000044013130603207021401 0ustar omeromer00000000000000========================================================== Time Utilities - ``kombu.utils.time`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.time .. automodule:: kombu.utils.time :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.aws.sqs.message.rst0000644000175000017500000000047713130603207023454 0ustar omeromer00000000000000========================================================== SQS Messages - ``kombu.async.aws.sqs.message`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.aws.sqs.message .. automodule:: kombu.async.aws.sqs.message :members: :undoc-members: kombu-4.1.0/docs/reference/index.rst0000644000175000017500000000327113130603207017304 0ustar omeromer00000000000000=========================== API Reference =========================== :Release: |version| :Date: |today| .. toctree:: :maxdepth: 1 kombu kombu.common kombu.mixins kombu.simple kombu.clocks kombu.compat kombu.pidbox kombu.exceptions kombu.log kombu.connection kombu.message kombu.compression kombu.pools kombu.abstract kombu.resource kombu.async kombu.async.hub kombu.async.semaphore kombu.async.timer kombu.async.debug kombu.async.http kombu.async.http.base kombu.async.http.curl kombu.async.aws kombu.async.aws.connection kombu.async.aws.sqs kombu.async.aws.sqs.connection kombu.async.aws.sqs.message kombu.async.aws.sqs.queue kombu.transport kombu.transport.pyamqp kombu.transport.librabbitmq kombu.transport.qpid kombu.transport.memory kombu.transport.redis kombu.transport.mongodb kombu.transport.consul kombu.transport.etcd kombu.transport.zookeeper kombu.transport.filesystem kombu.transport.sqlalchemy kombu.transport.sqlalchemy.models kombu.transport.SQS kombu.transport.SLMQ kombu.transport.pyro kombu.transport.base kombu.transport.virtual kombu.transport.virtual.exchange kombu.serialization kombu.utils.amq_manager kombu.utils.collections kombu.utils.compat kombu.utils.debug kombu.utils.div kombu.utils.encoding kombu.utils.eventio kombu.utils.functional kombu.utils.imports kombu.utils.json kombu.utils.limits kombu.utils.objects kombu.utils.scheduling kombu.utils.text kombu.utils.time kombu.utils.url kombu.utils.uuid kombu.five kombu-4.1.0/docs/reference/kombu.transport.qpid.rst0000644000175000017500000000123013130603207022272 0ustar omeromer00000000000000=================================================== Apache QPid Transport - ``kombu.transport.qpid`` =================================================== .. currentmodule:: kombu.transport.qpid .. automodule:: kombu.transport.qpid .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Connection ---------- .. autoclass:: Connection :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: Message ------- .. autoclass:: Message :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.objects.rst0000644000175000017500000000046413130603207022102 0ustar omeromer00000000000000========================================================== Object/Property Utilities - ``kombu.utils.objects`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.objects .. automodule:: kombu.utils.objects :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.http.rst0000644000175000017500000000044313130603207021402 0ustar omeromer00000000000000========================================================== Async HTTP Client - ``kombu.async.http`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.http .. automodule:: kombu.async.http :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.encoding.rst0000644000175000017500000000046713130603207022242 0ustar omeromer00000000000000========================================================== String Encoding Utilities - ``kombu.utils.encoding`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.encoding .. automodule:: kombu.utils.encoding :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.rst0000644000175000017500000000041513130603207020423 0ustar omeromer00000000000000========================================================== Event Loop - ``kombu.async`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async .. automodule:: kombu.async :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.debug.rst0000644000175000017500000000045713130603207021516 0ustar omeromer00000000000000========================================================== Event Loop Debugging Utils - ``kombu.async.debug`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.debug .. automodule:: kombu.async.debug :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.five.rst0000644000175000017500000000043613130603207020242 0ustar omeromer00000000000000========================================================== Python 2 to Python 3 utilities - ``kombu.five`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.five .. automodule:: kombu.five :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.semaphore.rst0000644000175000017500000000045313130603207022407 0ustar omeromer00000000000000========================================================== Semaphores - ``kombu.async.semaphore`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.semaphore .. automodule:: kombu.async.semaphore :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.collections.rst0000644000175000017500000000047113130603207022765 0ustar omeromer00000000000000========================================================== Custom Collections - ``kombu.utils.collections`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.collections .. automodule:: kombu.utils.collections :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.clocks.rst0000644000175000017500000000045013130603207020563 0ustar omeromer00000000000000========================================================== Logical Clocks and Synchronization - ``kombu.clocks`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.clocks .. automodule:: kombu.clocks :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.div.rst0000644000175000017500000000043413130603207021230 0ustar omeromer00000000000000========================================================== Div Utilities - ``kombu.utils.div`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.div .. automodule:: kombu.utils.div :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.pools.rst0000644000175000017500000000043413130603207020443 0ustar omeromer00000000000000========================================================== Connection/Producer Pools - ``kombu.pools`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.pools .. automodule:: kombu.pools :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.limits.rst0000644000175000017500000000044513130603207021751 0ustar omeromer00000000000000========================================================== Rate limiting - ``kombu.utils.limits`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.limits .. automodule:: kombu.utils.limits :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.amq_manager.rst0000644000175000017500000000047313130603207022721 0ustar omeromer00000000000000======================================================== Generic RabbitMQ manager - ``kombu.utils.amq_manager`` ======================================================== .. contents:: :local: .. currentmodule:: kombu.utils.amq_manager .. automodule:: kombu.utils.amq_manager :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.serialization.rst0000644000175000017500000000223313130603207022163 0ustar omeromer00000000000000======================================== Message Serialization - ``kombu`` ======================================== .. currentmodule:: kombu.serialization .. automodule:: kombu.serialization .. contents:: :local: Overview -------- Centralized support for encoding/decoding of data structures. Contains json, pickle, msgpack, and yaml serializers. Optionally installs support for YAML if the `PyYAML`_ package is installed. Optionally installs support for `msgpack`_ if the `msgpack-python`_ package is installed. Exceptions ---------- .. autoexception:: SerializerNotInstalled Serialization ------------- .. autofunction:: encode .. autofunction:: decode .. autofunction:: raw_encode Registry -------- .. autofunction:: register .. autodata:: registry .. _`cjson`: http://pypi.python.org/pypi/python-cjson/ .. _`simplejson`: http://code.google.com/p/simplejson/ .. _`Python 2.6+`: http://docs.python.org/library/json.html .. _`PyYAML`: http://pyyaml.org/ .. _`msgpack`: http://msgpack.sourceforge.net/ .. _`msgpack-python`: http://pypi.python.org/pypi/msgpack-python/ kombu-4.1.0/docs/reference/kombu.async.aws.sqs.rst0000644000175000017500000000046213130603207022023 0ustar omeromer00000000000000========================================================== Async Amazon SQS Client - ``kombu.async.aws.sqs`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.aws.sqs .. automodule:: kombu.async.aws.sqs :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.resource.rst0000644000175000017500000000043713130603207021141 0ustar omeromer00000000000000========================================================== Resource Management - ``kombu.resource`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.resource .. automodule:: kombu.resource :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.mixins.rst0000644000175000017500000000042313130603207020614 0ustar omeromer00000000000000========================================================== Mixin Classes - ``kombu.mixins`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.mixins .. automodule:: kombu.mixins :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.memory.rst0000644000175000017500000000072213130603207022652 0ustar omeromer00000000000000================================================== In-memory Transport - ``kombu.transport.memory`` ================================================== .. currentmodule:: kombu.transport.memory .. automodule:: kombu.transport.memory .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.pidbox.rst0000644000175000017500000000443713130603207020603 0ustar omeromer00000000000000========================================= Pidbox - ``kombu.pidbox`` ========================================= .. currentmodule:: kombu.pidbox .. automodule:: kombu.pidbox .. contents:: :local: Introduction ------------ Creating the applications Mailbox ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python >>> mailbox = pidbox.Mailbox('celerybeat', type='direct') >>> @mailbox.handler >>> def reload_schedule(state, **kwargs): ... state['beat'].reload_schedule() >>> @mailbox.handler >>> def connection_info(state, **kwargs): ... return {'connection': state['connection'].info()} Example Node ~~~~~~~~~~~~ .. code-block:: python >>> connection = kombu.Connection() >>> state = {'beat': beat, 'connection': connection} >>> consumer = mailbox(connection).Node(hostname).listen() >>> try: ... while True: ... connection.drain_events(timeout=1) ... finally: ... consumer.cancel() Example Client ~~~~~~~~~~~~~~ .. code-block:: python >>> mailbox.cast('reload_schedule') # cast is async. >>> info = celerybeat.call('connection_info', timeout=1) Mailbox ------- .. autoclass:: Mailbox .. autoattribute:: namespace .. autoattribute:: connection .. autoattribute:: type .. autoattribute:: exchange .. autoattribute:: reply_exchange .. automethod:: Node .. automethod:: call .. automethod:: cast .. automethod:: abcast .. automethod:: multi_call .. automethod:: get_reply_queue .. automethod:: get_queue Node ---- .. autoclass:: Node .. autoattribute:: hostname .. autoattribute:: mailbox .. autoattribute:: handlers .. autoattribute:: state .. autoattribute:: channel .. automethod:: Consumer .. automethod:: handler .. automethod:: listen .. automethod:: dispatch .. automethod:: dispatch_from_message .. automethod:: handle_call .. automethod:: handle_cast .. automethod:: handle .. automethod:: handle_message .. automethod:: reply kombu-4.1.0/docs/reference/kombu.utils.scheduling.rst0000644000175000017500000000044513130603207022575 0ustar omeromer00000000000000================================================= Consumer Scheduling - ``kombu.utils.scheduling`` ================================================= .. contents:: :local: .. currentmodule:: kombu.utils.scheduling .. automodule:: kombu.utils.scheduling :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.pyamqp.rst0000644000175000017500000000131313130603207022646 0ustar omeromer00000000000000========================================================= Pure-python AMQP Transport - ``kombu.transport.pyamqp`` ========================================================= .. currentmodule:: kombu.transport.pyamqp .. automodule:: kombu.transport.pyamqp .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Connection ---------- .. autoclass:: Connection :members: :undoc-members: :inherited-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: Message ------- .. autoclass:: Message :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.virtual.exchange.rst0000644000175000017500000000140413130603207024607 0ustar omeromer00000000000000============================================================================= Virtual AMQ Exchange Implementation - ``kombu.transport.virtual.exchange`` ============================================================================= .. currentmodule:: kombu.transport.virtual.exchange .. automodule:: kombu.transport.virtual.exchange .. contents:: :local: Direct ------ .. autoclass:: DirectExchange :members: :undoc-members: Topic ----- .. autoclass:: TopicExchange :members: :undoc-members: Fanout ------ .. autoclass:: FanoutExchange :members: :undoc-members: Interface --------- .. autoclass:: ExchangeType :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.compression.rst0000644000175000017500000000100113130603207021637 0ustar omeromer00000000000000============================================= Message Compression - ``kombu.compression`` ============================================= .. currentmodule:: kombu.compression .. automodule:: kombu.compression .. contents:: :local: Encoding/decoding ----------------- .. autofunction:: compress .. autofunction:: decompress Registry -------- .. autofunction:: encoders .. autofunction:: get_encoder .. autofunction:: get_decoder .. autofunction:: register kombu-4.1.0/docs/reference/kombu.transport.consul.rst0000644000175000017500000000071313130603207022645 0ustar omeromer00000000000000================================================ Consul Transport - ``kombu.transport.consul`` ================================================ .. currentmodule:: kombu.transport.consul .. automodule:: kombu.transport.consul .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.etcd.rst0000644000175000017500000000070313130603207022260 0ustar omeromer00000000000000================================================ Etcd Transport - ``kombu.transport.etcd`` ================================================ .. currentmodule:: kombu.transport.etcd .. automodule:: kombu.transport.etcd .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.text.rst0000644000175000017500000000044113130603207021430 0ustar omeromer00000000000000========================================================== Text utilitites - ``kombu.utils.text`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.text .. automodule:: kombu.utils.text :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.redis.rst0000644000175000017500000000071113130603207022446 0ustar omeromer00000000000000================================================= Redis Transport - ``kombu.transport.redis`` ================================================= .. currentmodule:: kombu.transport.redis .. automodule:: kombu.transport.redis .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.aws.connection.rst0000644000175000017500000000050513130603207023352 0ustar omeromer00000000000000========================================================== Amazon AWS Connection - ``kombu.async.aws.connection`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.aws.connection .. automodule:: kombu.async.aws.connection :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.librabbitmq.rst0000644000175000017500000000134513130603207023634 0ustar omeromer00000000000000=============================================================== librabbitmq AMQP transport - ``kombu.transport.librabbitmq`` =============================================================== .. currentmodule:: kombu.transport.librabbitmq .. automodule:: kombu.transport.librabbitmq .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Connection ---------- .. autoclass:: Connection :members: :undoc-members: :inherited-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: Message ------- .. autoclass:: Message :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.zookeeper.rst0000644000175000017500000000074413130603207023351 0ustar omeromer00000000000000====================================================== Zookeeper Transport - ``kombu.transport.zookeeper`` ====================================================== .. currentmodule:: kombu.transport.zookeeper .. automodule:: kombu.transport.zookeeper .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.aws.sqs.connection.rst0000644000175000017500000000051213130603207024155 0ustar omeromer00000000000000========================================================== SQS Connection - ``kombu.async.aws.sqs.connection`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.aws.sqs.connection .. automodule:: kombu.async.aws.sqs.connection :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.http.base.rst0000644000175000017500000000047413130603207022317 0ustar omeromer00000000000000========================================================== Async HTTP Client Interface - ``kombu.async.http.base`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.http.base .. automodule:: kombu.async.http.base :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.SQS.rst0000644000175000017500000000070613130603207022012 0ustar omeromer00000000000000================================================ Amazon SQS Transport - ``kombu.transport.SQS`` ================================================ .. currentmodule:: kombu.transport.SQS .. automodule:: kombu.transport.SQS .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.hub.rst0000644000175000017500000000045013130603207021177 0ustar omeromer00000000000000========================================================== Event Loop Implementation - ``kombu.async.hub`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.hub .. automodule:: kombu.async.hub :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.aws.rst0000644000175000017500000000044613130603207021220 0ustar omeromer00000000000000========================================================== Async Amazon AWS Client - ``kombu.async.aws`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.aws .. automodule:: kombu.async.aws :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.virtual.rst0000644000175000017500000000433713130603207023036 0ustar omeromer00000000000000============================================================ Virtual Transport Base Class - ``kombu.transport.virtual`` ============================================================ .. currentmodule:: kombu.transport.virtual .. automodule:: kombu.transport.virtual .. contents:: :local: Transports ---------- .. autoclass:: Transport .. autoattribute:: Channel .. autoattribute:: Cycle .. autoattribute:: polling_interval .. autoattribute:: default_port .. autoattribute:: state .. autoattribute:: cycle .. automethod:: establish_connection .. automethod:: close_connection .. automethod:: create_channel .. automethod:: close_channel .. automethod:: drain_events Channel ------- .. autoclass:: AbstractChannel :members: .. autoclass:: Channel .. autoattribute:: Message .. autoattribute:: state .. autoattribute:: qos .. autoattribute:: do_restore .. autoattribute:: exchange_types .. automethod:: exchange_declare .. automethod:: exchange_delete .. automethod:: queue_declare .. automethod:: queue_delete .. automethod:: queue_bind .. automethod:: queue_purge .. automethod:: basic_publish .. automethod:: basic_consume .. automethod:: basic_cancel .. automethod:: basic_get .. automethod:: basic_ack .. automethod:: basic_recover .. automethod:: basic_reject .. automethod:: basic_qos .. automethod:: get_table .. automethod:: typeof .. automethod:: drain_events .. automethod:: prepare_message .. automethod:: message_to_python .. automethod:: flow .. automethod:: close Message ------- .. autoclass:: Message :members: :undoc-members: :inherited-members: Quality Of Service ------------------ .. autoclass:: QoS :members: :undoc-members: :inherited-members: In-memory State --------------- .. autoclass:: BrokerState :members: :undoc-members: :inherited-members: kombu-4.1.0/docs/reference/kombu.exceptions.rst0000644000175000017500000000071513130603207021472 0ustar omeromer00000000000000===================================== Exceptions - ``kombu.exceptions`` ===================================== .. currentmodule:: kombu.exceptions .. automodule:: kombu.exceptions .. contents:: :local: .. autoexception:: NotBoundError .. autoexception:: MessageStateError .. autoexception:: TimeoutError .. autoexception:: LimitExceeded .. autoexception:: ConnectionLimitExceeded .. autoexception:: ChannelLimitExceeded kombu-4.1.0/docs/reference/kombu.compat.rst0000644000175000017500000000135113130603207020571 0ustar omeromer00000000000000========================================== Carrot Compatibility - ``kombu.compat`` ========================================== .. currentmodule:: kombu.compat .. automodule:: kombu.compat .. contents:: :local: Publisher --------- Replace with :class:`kombu.Producer`. .. autoclass:: Publisher :members: :undoc-members: :inherited-members: Consumer -------- Replace with :class:`kombu.Consumer`. .. autoclass:: Consumer :members: :undoc-members: :inherited-members: ConsumerSet ----------- Replace with :class:`kombu.Consumer`. .. autoclass:: ConsumerSet :members: :undoc-members: :inherited-members: kombu-4.1.0/docs/reference/kombu.utils.functional.rst0000644000175000017500000000047613130603207022616 0ustar omeromer00000000000000========================================================== Functional-style Utilities - ``kombu.utils.functional`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.functional .. automodule:: kombu.utils.functional :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.debug.rst0000644000175000017500000000045013130603207021532 0ustar omeromer00000000000000========================================================== Debugging Utilities - ``kombu.utils.debug`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.debug .. automodule:: kombu.utils.debug :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.http.curl.rst0000644000175000017500000000047113130603207022347 0ustar omeromer00000000000000========================================================== Async pyCurl HTTP Client - ``kombu.async.http.curl`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.http.curl .. automodule:: kombu.async.http.curl :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.simple.rst0000644000175000017500000000377513130603207020613 0ustar omeromer00000000000000=============================================== Simple Messaging API - ``kombu.simple`` =============================================== .. currentmodule:: kombu.simple .. automodule:: kombu.simple .. contents:: :local: Persistent ---------- .. autoclass:: SimpleQueue .. attribute:: channel Current channel .. attribute:: producer :class:`~kombu.Producer` used to publish messages. .. attribute:: consumer :class:`~kombu.Consumer` used to receive messages. .. attribute:: no_ack flag to enable/disable acknowledgments. .. attribute:: queue :class:`~kombu.Queue` to consume from (if consuming). .. attribute:: queue_opts Additional options for the queue declaration. .. attribute:: exchange_opts Additional options for the exchange declaration. .. automethod:: get .. automethod:: get_nowait .. automethod:: put .. automethod:: clear .. automethod:: __len__ .. automethod:: qsize .. automethod:: close Buffer ------ .. autoclass:: SimpleBuffer .. attribute:: channel Current channel .. attribute:: producer :class:`~kombu.Producer` used to publish messages. .. attribute:: consumer :class:`~kombu.Consumer` used to receive messages. .. attribute:: no_ack flag to enable/disable acknowledgments. .. attribute:: queue :class:`~kombu.Queue` to consume from (if consuming). .. attribute:: queue_opts Additional options for the queue declaration. .. attribute:: exchange_opts Additional options for the exchange declaration. .. automethod:: get .. automethod:: get_nowait .. automethod:: put .. automethod:: clear .. automethod:: __len__ .. automethod:: qsize .. automethod:: close kombu-4.1.0/docs/reference/kombu.common.rst0000644000175000017500000000042613130603207020600 0ustar omeromer00000000000000========================================================== Common Utilities - ``kombu.common`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.common .. automodule:: kombu.common :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.sqlalchemy.models.rst0000644000175000017500000000131513130603207024765 0ustar omeromer00000000000000===================================================================== SQLAlchemy Transport Model - ``kombu.transport.sqlalchemy.models`` ===================================================================== .. currentmodule:: kombu.transport.sqlalchemy.models .. automodule:: kombu.transport.sqlalchemy.models .. contents:: :local: Models ------ .. autoclass:: Queue .. autoattribute:: Queue.id .. autoattribute:: Queue.name .. autoclass:: Message .. autoattribute:: Message.id .. autoattribute:: Message.visible .. autoattribute:: Message.sent_at .. autoattribute:: Message.payload .. autoattribute:: Message.version kombu-4.1.0/docs/reference/kombu.async.aws.sqs.queue.rst0000644000175000017500000000046713130603207023153 0ustar omeromer00000000000000========================================================== SQS Queues - ``kombu.async.aws.sqs.queue`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.aws.sqs.queue .. automodule:: kombu.async.aws.sqs.queue :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.imports.rst0000644000175000017500000000046513130603207022147 0ustar omeromer00000000000000========================================================== Module Importing Utilities - ``kombu.utils.imports`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.imports .. automodule:: kombu.utils.imports :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.rst0000644000175000017500000001361713130603207017317 0ustar omeromer00000000000000=================================== Kombu - ``kombu`` =================================== .. currentmodule:: kombu .. contents:: :local: .. automodule:: kombu .. autofunction:: enable_insecure_serializers .. autofunction:: disable_insecure_serializers Connection ---------- .. autoclass:: Connection .. admonition:: Attributes .. autoattribute:: hostname .. autoattribute:: port .. autoattribute:: userid .. autoattribute:: password .. autoattribute:: virtual_host .. autoattribute:: ssl .. autoattribute:: login_method .. autoattribute:: failover_strategy .. autoattribute:: connect_timeout .. autoattribute:: heartbeat .. autoattribute:: default_channel .. autoattribute:: connected .. autoattribute:: recoverable_connection_errors .. autoattribute:: recoverable_channel_errors .. autoattribute:: connection_errors .. autoattribute:: channel_errors .. autoattribute:: transport .. autoattribute:: connection .. autoattribute:: uri_prefix .. autoattribute:: declared_entities .. autoattribute:: cycle .. autoattribute:: host .. autoattribute:: manager .. autoattribute:: supports_heartbeats .. autoattribute:: is_evented .. admonition:: Methods .. automethod:: as_uri .. automethod:: connect .. automethod:: channel .. automethod:: drain_events .. automethod:: release .. automethod:: autoretry .. automethod:: ensure_connection .. automethod:: ensure .. automethod:: revive .. automethod:: create_transport .. automethod:: get_transport_cls .. automethod:: clone .. automethod:: info .. automethod:: switch .. automethod:: maybe_switch_next .. automethod:: heartbeat_check .. automethod:: maybe_close_channel .. automethod:: register_with_event_loop .. automethod:: close .. automethod:: _close .. automethod:: completes_cycle .. automethod:: get_manager .. automethod:: Producer .. automethod:: Consumer .. automethod:: Pool .. automethod:: ChannelPool .. automethod:: SimpleQueue .. automethod:: SimpleBuffer Exchange -------- Example creating an exchange declaration:: >>> news_exchange = Exchange('news', type='topic') For now `news_exchange` is just a declaration, you can't perform actions on it. It just describes the name and options for the exchange. The exchange can be bound or unbound. Bound means the exchange is associated with a channel and operations can be performed on it. To bind the exchange you call the exchange with the channel as argument:: >>> bound_exchange = news_exchange(channel) Now you can perform operations like :meth:`declare` or :meth:`delete`:: >>> # Declare exchange manually >>> bound_exchange.declare() >>> # Publish raw string message using low-level exchange API >>> bound_exchange.publish( ... 'Cure for cancer found!', ... routing_key='news.science', ... ) >>> # Delete exchange. >>> bound_exchange.delete() .. autoclass:: Exchange :members: :undoc-members: .. automethod:: maybe_bind Queue ----- Example creating a queue using our exchange in the :class:`Exchange` example:: >>> science_news = Queue('science_news', ... exchange=news_exchange, ... routing_key='news.science') For now `science_news` is just a declaration, you can't perform actions on it. It just describes the name and options for the queue. The queue can be bound or unbound. Bound means the queue is associated with a channel and operations can be performed on it. To bind the queue you call the queue instance with the channel as an argument:: >>> bound_science_news = science_news(channel) Now you can perform operations like :meth:`declare` or :meth:`purge`: .. code-block:: python >>> bound_science_news.declare() >>> bound_science_news.purge() >>> bound_science_news.delete() .. autoclass:: Queue :members: :undoc-members: .. automethod:: maybe_bind Message Producer ---------------- .. autoclass:: Producer .. autoattribute:: channel .. autoattribute:: exchange .. autoattribute:: routing_key .. autoattribute:: serializer .. autoattribute:: compression .. autoattribute:: auto_declare .. autoattribute:: on_return .. autoattribute:: connection .. automethod:: declare .. automethod:: maybe_declare .. automethod:: publish .. automethod:: revive Message Consumer ---------------- .. autoclass:: Consumer .. autoattribute:: channel .. autoattribute:: queues .. autoattribute:: no_ack .. autoattribute:: auto_declare .. autoattribute:: callbacks .. autoattribute:: on_message .. autoattribute:: on_decode_error .. autoattribute:: connection .. automethod:: declare .. automethod:: register_callback .. automethod:: add_queue .. automethod:: consume .. automethod:: cancel .. automethod:: cancel_by_queue .. automethod:: consuming_from .. automethod:: purge .. automethod:: flow .. automethod:: qos .. automethod:: recover .. automethod:: receive .. automethod:: revive kombu-4.1.0/docs/reference/kombu.transport.sqlalchemy.rst0000644000175000017500000000076413130603207023512 0ustar omeromer00000000000000=========================================================== SQLAlchemy Transport Model - kombu.transport.sqlalchemy =========================================================== .. currentmodule:: kombu.transport.sqlalchemy .. automodule:: kombu.transport.sqlalchemy .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.SLMQ.rst0000644000175000017500000000067613130603207022126 0ustar omeromer00000000000000============================================= SLMQ Transport - ``kombu.transport.SLMQ`` ============================================= .. currentmodule:: kombu.transport.SLMQ .. automodule:: kombu.transport.SLMQ .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.log.rst0000644000175000017500000000040413130603207020065 0ustar omeromer00000000000000========================================================== Logging - ``kombu.log`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.log .. automodule:: kombu.log :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.message.rst0000644000175000017500000000043013130603207020727 0ustar omeromer00000000000000========================================================== Message Objects - ``kombu.message`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.message .. automodule:: kombu.message :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.json.rst0000644000175000017500000000044013130603207021414 0ustar omeromer00000000000000========================================================== JSON Utilities - ``kombu.utils.json`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.json .. automodule:: kombu.utils.json :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.url.rst0000644000175000017500000000040413130603207021245 0ustar omeromer00000000000000============================================== URL Utilities - ``kombu.utils.url`` ============================================== .. contents:: :local: .. currentmodule:: kombu.utils.url .. automodule:: kombu.utils.url :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.transport.mongodb.rst0000644000175000017500000000072113130603207022766 0ustar omeromer00000000000000================================================= MongoDB Transport - ``kombu.transport.mongodb`` ================================================= .. currentmodule:: kombu.transport.mongodb .. automodule:: kombu.transport.mongodb .. contents:: :local: Transport --------- .. autoclass:: Transport :members: :undoc-members: Channel ------- .. autoclass:: Channel :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.utils.uuid.rst0000644000175000017500000000044013130603207021411 0ustar omeromer00000000000000========================================================== UUID Utilities - ``kombu.utils.uuid`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.utils.uuid .. automodule:: kombu.utils.uuid :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.async.timer.rst0000644000175000017500000000043213130603207021541 0ustar omeromer00000000000000========================================================== Timer - ``kombu.async.timer`` ========================================================== .. contents:: :local: .. currentmodule:: kombu.async.timer .. automodule:: kombu.async.timer :members: :undoc-members: kombu-4.1.0/docs/reference/kombu.abstract.rst0000644000175000017500000000045513130603207021115 0ustar omeromer00000000000000======================================= Abstract Classes - ``kombu.abstract`` ======================================= .. currentmodule:: kombu.abstract .. automodule:: kombu.abstract .. contents:: :local: .. autoclass:: MaybeChannelBound :members: :undoc-members: kombu-4.1.0/docs/userguide/0000755000175000017500000000000013134154263015505 5ustar omeromer00000000000000kombu-4.1.0/docs/userguide/examples.rst0000644000175000017500000000202713130603207020047 0ustar omeromer00000000000000.. _examples: ======================== Examples ======================== .. _hello-world-example: Hello World Example =================== Below example uses :ref:`guide-simple` to send helloworld message through message broker (rabbitmq) and print received message :file:`hello_publisher.py`: .. literalinclude:: ../../examples/hello_publisher.py :language: python :file:`hello_consumer.py`: .. literalinclude:: ../../examples/hello_consumer.py :language: python .. _task-queue-example: Task Queue Example ================== Very simple task queue using pickle, with primitive support for priorities using different queues. :file:`queues.py`: .. literalinclude:: ../../examples/simple_task_queue/queues.py :language: python :file:`worker.py`: .. literalinclude:: ../../examples/simple_task_queue/worker.py :language: python :file:`tasks.py`: .. literalinclude:: ../../examples/simple_task_queue/tasks.py :language: python :file:`client.py`: .. literalinclude:: ../../examples/simple_task_queue/client.py kombu-4.1.0/docs/userguide/pools.rst0000644000175000017500000001215613130603207017371 0ustar omeromer00000000000000.. _guide-pools: =============================== Connection and Producer Pools =============================== .. _default-pools: Default Pools ============= Kombu ships with two global pools: one connection pool, and one producer pool. These are convenient and the fact that they are global may not be an issue as connections should often be limited at the process level, rather than per thread/application and so on, but if you need custom pools per thread see :ref:`custom-pool-groups`. .. _default-connections: The connection pool group ------------------------- The connection pools are available as :attr:`kombu.pools.connections`. This is a pool group, which means you give it a connection instance, and you get a pool instance back. We have one pool per connection instance to support multiple connections in the same app. All connection instances with the same connection parameters will get the same pool: .. code-block:: pycon >>> from kombu import Connection >>> from kombu.pools import connections >>> connections[Connection('redis://localhost:6379')] >>> connections[Connection('redis://localhost:6379')] Let's acquire and release a connection: .. code-block:: python from kombu import Connection from kombu.pools import connections connection = Connection('redis://localhost:6379') with connections[connection].acquire(block=True) as conn: print('Got connection: {0!r}'.format(connection.as_uri())) .. note:: The ``block=True`` here means that the acquire call will block until a connection is available in the pool. Note that this will block forever in case there is a deadlock in your code where a connection is not released. There is a ``timeout`` argument you can use to safeguard against this (see :meth:`kombu.connection.Resource.acquire`). If blocking is disabled and there aren't any connections left in the pool an :class:`kombu.exceptions.ConnectionLimitExceeded` exception will be raised. That's about it. If you need to connect to multiple brokers at once you can do that too: .. code-block:: python from kombu import Connection from kombu.pools import connections c1 = Connection('amqp://') c2 = Connection('redis://') with connections[c1].acquire(block=True) as conn1: with connections[c2].acquire(block=True) as conn2: # .... .. _default-producers: The producer pool group ======================= This is a pool group just like the connections, except that it manages :class:`~kombu.Producer` instances used to publish messages. Here is an example using the producer pool to publish a message to the ``news`` exchange: .. code-block:: python from kombu import Connection, Exchange from kombu.pools import producers # The exchange we send our news articles to. news_exchange = Exchange('news') # The article we want to send article = {'title': 'No cellular coverage on the tube for 2012', 'ingress': 'yadda yadda yadda'} # The broker where our exchange is. connection = Connection('amqp://guest:guest@localhost:5672//') with producers[connection].acquire(block=True) as producer: producer.publish( article, exchange=new_exchange, routing_key='domestic', declare=[news_exchange], serializer='json', compression='zlib') .. _default-pool-limits: Setting pool limits ------------------- By default every connection instance has a limit of 200 connections. You can change this limit using :func:`kombu.pools.set_limit`. You are able to grow the pool at runtime, but you can't shrink it, so it is best to set the limit as early as possible after your application starts: .. code-block:: pycon >>> from kombu import pools >>> pools.set_limit() Resetting all pools ------------------- You can close all active connections and reset all pool groups by using the :func:`kombu.pools.reset` function. Note that this will not respect anything currently using these connections, so will just drag the connections away from under their feet: you should be very careful before you use this. Kombu will reset the pools if the process is forked, so that forked processes start with clean pool groups. .. _custom-pool-groups: Custom Pool Groups ================== To maintain your own pool groups you should create your own :class:`~kombu.pools.Connections` and :class:`kombu.pools.Producers` instances: .. code-block:: python from kombu import pools from kombu import Connection connections = pools.Connections(limit=100) producers = pools.Producers(limit=connections.limit) connection = Connection('amqp://guest:guest@localhost:5672//') with connections[connection].acquire(block=True): # ... If you want to use the global limit that can be set with :func:`~kombu.pools.set_limit` you can use a special value as the ``limit`` argument: .. code-block:: python from kombu import pools connections = pools.Connections(limit=pools.use_default_limit) kombu-4.1.0/docs/userguide/connections.rst0000644000175000017500000001531713130603207020561 0ustar omeromer00000000000000.. _guide-connections: ============================ Connections and transports ============================ .. _connection-basics: Basics ====== To send and receive messages you need a transport and a connection. There are several transports to choose from (amqp, librabbitmq, redis, qpid, in-memory, etc.), and you can even create your own. The default transport is amqp. Create a connection using the default transport: .. code-block:: pycon >>> from kombu import Connection >>> connection = Connection('amqp://guest:guest@localhost:5672//') The connection will not be established yet, as the connection is established when needed. If you want to explicitly establish the connection you have to call the :meth:`~kombu.Connection.connect` method: .. code-block:: pycon >>> connection.connect() You can also check whether the connection is connected: .. code-block:: pycon >>> connection.connected True Connections must always be closed after use: .. code-block:: pycon >>> connection.close() But best practice is to release the connection instead, this will release the resource if the connection is associated with a connection pool, or close the connection if not, and makes it easier to do the transition to connection pools later: .. code-block:: pycon >>> connection.release() .. seealso:: :ref:`guide-pools` Of course, the connection can be used as a context, and you are encouraged to do so as it makes it harder to forget releasing open resources: .. code-block:: python with Connection() as connection: # work with connection .. _connection-urls: URLs ==== Connection parameters can be provided as a URL in the format: .. code-block:: text transport://userid:password@hostname:port/virtual_host All of these are valid URLs: .. code-block:: text # Specifies using the amqp transport only, default values # are taken from the keyword arguments. amqp:// # Using Redis redis://localhost:6379/ # Using Redis over a Unix socket redis+socket:///tmp/redis.sock # Using Qpid qpid://localhost/ # Using virtual host '/foo' amqp://localhost//foo # Using virtual host 'foo' amqp://localhost/foo The query part of the URL can also be used to set options, e.g.: .. code-block:: text amqp://localhost/myvhost?ssl=1 See :ref:`connection-options` for a list of supported options. A connection without options will use the default connection settings, which is using the localhost host, default port, user name `guest`, password `guest` and virtual host "/". A connection without arguments is the same as: .. code-block:: pycon >>> Connection('amqp://guest:guest@localhost:5672//') The default port is transport specific, for AMQP this is 5672. Other fields may also have different meaning depending on the transport used. For example, the Redis transport uses the `virtual_host` argument as the redis database number. .. _connection-options: Keyword arguments ================= The :class:`~kombu.Connection` class supports additional keyword arguments, these are: :hostname: Default host name if not provided in the URL. :userid: Default user name if not provided in the URL. :password: Default password if not provided in the URL. :virtual_host: Default virtual host if not provided in the URL. :port: Default port if not provided in the URL. :transport: Default transport if not provided in the URL. Can be a string specifying the path to the class. (e.g. ``kombu.transport.pyamqp:Transport``), or one of the aliases: ``pyamqp``, ``librabbitmq``, ``redis``, ``qpid``, ``memory``, and so on. :ssl: Use SSL to connect to the server. Default is ``False``. Only supported by the amqp and qpid transports. :insist: Insist on connecting to a server. *No longer supported, relic from AMQP 0.8* :connect_timeout: Timeout in seconds for connecting to the server. May not be supported by the specified transport. :transport_options: A dict of additional connection arguments to pass to alternate kombu channel implementations. Consult the transport documentation for available options. AMQP Transports =============== There are 4 transports available for AMQP use. 1. ``pyamqp`` uses the pure Python library ``amqp``, automatically installed with Kombu. 2. ``librabbitmq`` uses the high performance transport written in C. This requires the ``librabbitmq`` Python package to be installed, which automatically compiles the C library. 3. ``amqp`` tries to use ``librabbitmq`` but falls back to ``pyamqp``. 4. ``qpid`` uses the pure Python library ``qpid.messaging``, automatically installed with Kombu. The Qpid library uses AMQP, but uses custom extensions specifically supported by the Apache Qpid Broker. For the highest performance, you should install the ``librabbitmq`` package. To ensure librabbitmq is used, you can explicitly specify it in the transport URL, or use ``amqp`` to have the fallback. Transport Comparison ==================== +---------------+----------+------------+------------+---------------+--------------+ | **Client** | **Type** | **Direct** | **Topic** | **Fanout** | **Priority** | +---------------+----------+------------+------------+---------------+--------------+ | *amqp* | Native | Yes | Yes | Yes | Yes [#f3]_ | +---------------+----------+------------+------------+---------------+--------------+ | *qpid* | Native | Yes | Yes | Yes | No | +---------------+----------+------------+------------+---------------+--------------+ | *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | Yes | +---------------+----------+------------+------------+---------------+--------------+ | *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | No | +---------------+----------+------------+------------+---------------+--------------+ | *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | Yes | +---------------+----------+------------+------------+---------------+--------------+ | *in-memory* | Virtual | Yes | Yes [#f1]_ | No | No | +---------------+----------+------------+------------+---------------+--------------+ | *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | No | +---------------+----------+------------+------------+---------------+--------------+ .. [#f1] Declarations only kept in memory, so exchanges/queues must be declared by all clients that needs them. .. [#f2] Fanout supported via storing routing tables in SimpleDB. Disabled by default, but can be enabled by using the ``supports_fanout`` transport option. .. [#f3] AMQP Message priority support depends on broker implementation. kombu-4.1.0/docs/userguide/simple.rst0000644000175000017500000000751613130603207017532 0ustar omeromer00000000000000.. _guide-simple: ================== Simple Interface ================== .. contents:: :local: :mod:`kombu.simple` is a simple interface to AMQP queueing. It is only slightly different from the :class:`~Queue.Queue` class in the Python Standard Library, which makes it excellent for users with basic messaging needs. Instead of defining exchanges and queues, the simple classes only requires two arguments, a connection channel and a name. The name is used as the queue, exchange and routing key. If the need arises, you can specify a :class:`~kombu.Queue` as the name argument instead. In addition, the :class:`~kombu.Connection` comes with shortcuts to create simple queues using the current connection: .. code-block:: pycon >>> queue = connection.SimpleQueue('myqueue') >>> # ... do something with queue >>> queue.close() This is equivalent to: .. code-block:: pycon >>> from kombu import SimpleQueue, SimpleBuffer >>> channel = connection.channel() >>> queue = SimpleBuffer(channel) >>> # ... do something with queue >>> channel.close() >>> queue.close() .. _simple-send-receive: Sending and receiving messages ============================== The simple interface defines two classes; :class:`~kombu.simple.SimpleQueue`, and :class:`~kombu.simple.SimpleBuffer`. The former is used for persistent messages, and the latter is used for transient, buffer-like queues. They both have the same interface, so you can use them interchangeably. Here is an example using the :class:`~kombu.simple.SimpleQueue` class to produce and consume logging messages: .. code-block:: python import socket import datetime from time import time from kombu import Connection class Logger(object): def __init__(self, connection, queue_name='log_queue', serializer='json', compression=None): self.queue = connection.SimpleQueue(queue_name) self.serializer = serializer self.compression = compression def log(self, message, level='INFO', context={}): self.queue.put({'message': message, 'level': level, 'context': context, 'hostname': socket.gethostname(), 'timestamp': time()}, serializer=self.serializer, compression=self.compression) def process(self, callback, n=1, timeout=1): for i in xrange(n): log_message = self.queue.get(block=True, timeout=1) entry = log_message.payload # deserialized data. callback(entry) log_message.ack() # remove message from queue def close(self): self.queue.close() if __name__ == '__main__': from contextlib import closing with Connection('amqp://guest:guest@localhost:5672//') as conn: with closing(Logger(conn)) as logger: # Send message logger.log('Error happened while encoding video', level='ERROR', context={'filename': 'cutekitten.mpg'}) # Consume and process message # This is the callback called when a log message is # received. def dump_entry(entry): date = datetime.datetime.fromtimestamp(entry['timestamp']) print('[%s %s %s] %s %r' % (date, entry['hostname'], entry['level'], entry['message'], entry['context'])) # Process a single message using the callback above. logger.process(dump_entry, n=1) kombu-4.1.0/docs/userguide/introduction.rst0000644000175000017500000000625713130603207020763 0ustar omeromer00000000000000.. _guide-intro: ============== Introduction ============== .. _intro-messaging: What is messaging? ================== In times long ago people didn't have email. They had the postal service, which with great courage would deliver mail from hand to hand all over the globe. Soldiers deployed at wars far away could only communicate with their families through the postal service, and posting a letter would mean that the recipient wouldn't actually receive the letter until weeks or months, sometimes years later. It's hard to imagine this today when people are expected to be available for phone calls every minute of the day. So humans need to communicate with each other, this shouldn't be news to anyone, but why would applications? One example is banks. When you transfer money from one bank to another, your bank sends a message to a central clearinghouse. The clearinghouse then records and coordinates the transaction. Banks need to send and receive millions and millions of messages every day, and losing a single message would mean either losing your money (bad) or the banks money (very bad) Another example is the stock exchanges, which also have a need for very high message throughputs and have strict reliability requirements. Email is a great way for people to communicate. It is much faster than using the postal service, but still using email as a means for programs to communicate would be like the soldier above, waiting for signs of life from his girlfriend back home. .. _messaging-scenarios: Messaging Scenarios =================== * Request/Reply The request/reply pattern works like the postal service example. A message is addressed to a single recipient, with a return address printed on the back. The recipient may or may not reply to the message by sending it back to the original sender. Request-Reply is achieved using *direct* exchanges. * Broadcast In a broadcast scenario a message is sent to all parties. This could be none, one or many recipients. Broadcast is achieved using *fanout* exchanges. * Publish/Subscribe In a publish/subscribe scenario producers publish messages to topics, and consumers subscribe to the topics they are interested in. If no consumers subscribe to the topic, then the message will not be delivered to anyone. If several consumers subscribe to the topic, then the message will be delivered to all of them. Pub-sub is achieved using *topic* exchanges. .. _messaging-reliability: Reliability =========== For some applications reliability is very important. Losing a message is a critical situation that must never happen. For other applications losing a message is fine, it can maybe recover in other ways, or the message is resent anyway as periodic updates. AMQP defines two built-in delivery modes: * persistent Messages are written to disk and survives a broker restart. * transient Messages may or may not be written to disk, as the broker sees fit to optimize memory contents. The messages won't survive a broker restart. Transient messaging is by far the fastest way to send and receive messages, so having persistent messages comes with a price, but for some applications this is a necessary cost. kombu-4.1.0/docs/userguide/index.rst0000644000175000017500000000033313130603207017336 0ustar omeromer00000000000000============ User Guide ============ :Release: |version| :Date: |today| .. toctree:: :maxdepth: 2 introduction connections producers consumers examples simple pools serialization kombu-4.1.0/docs/userguide/consumers.rst0000644000175000017500000001075313130603207020254 0ustar omeromer00000000000000.. _guide-consumers: =========== Consumers =========== .. _consumer-basics: Basics ====== The :class:`Consumer` takes a connection (or channel) and a list of queues to consume from. Several consumers can be mixed to consume from different channels, as they all bind to the same connection, and ``drain_events`` will drain events from all channels on that connection. .. note:: Kombu since 3.0 will only accept json/binary or text messages by default, to allow deserialization of other formats you have to specify them in the ``accept`` argument (in addition to setting the right content type for your messages): .. code-block:: python Consumer(conn, accept=['json', 'pickle', 'msgpack', 'yaml']) Draining events from a single consumer: .. code-block:: python with Consumer(connection, queues, accept=['json']): connection.drain_events(timeout=1) Draining events from several consumers: .. code-block:: python from kombu.utils.compat import nested with connection.channel(), connection.channel() as (channel1, channel2): with nested(Consumer(channel1, queues1, accept=['json']), Consumer(channel2, queues2, accept=['json'])): connection.drain_events(timeout=1) Or using :class:`~kombu.mixins.ConsumerMixin`: .. code-block:: python from kombu.mixins import ConsumerMixin class C(ConsumerMixin): def __init__(self, connection): self.connection = connection def get_consumers(self, Consumer, channel): return [ Consumer(queues, callbacks=[self.on_message], accept=['json']), ] def on_message(self, body, message): print('RECEIVED MESSAGE: {0!r}'.format(body)) message.ack() C(connection).run() and with multiple channels again: .. code-block:: python from kombu import Consumer from kombu.mixins import ConsumerMixin class C(ConsumerMixin): channel2 = None def __init__(self, connection): self.connection = connection def get_consumers(self, _, default_channel): self.channel2 = default_channel.connection.channel() return [Consumer(default_channel, queues1, callbacks=[self.on_message], accept=['json']), Consumer(self.channel2, queues2, callbacks=[self.on_special_message], accept=['json'])] def on_consumer_end(self, connection, default_channel): if self.channel2: self.channel2.close() C(connection).run() There's also a :class:`~kombu.mixins.ConsumerProducerMixin` for consumers that need to also publish messages on a separate connection (e.g. sending rpc replies, streaming results): .. code-block:: python from kombu import Producer, Queue from kombu.mixins import ConsumerProducerMixin rpc_queue = Queue('rpc_queue') class Worker(ConsumerProducerMixin): def __init__(self, connection): self.connection = connection def get_consumers(self, Consumer, channel): return [Consumer( queues=[rpc_queue], on_message=self.on_request, accept={'application/json'}, prefetch_count=1, )] def on_request(self, message): n = message.payload['n'] print(' [.] fib({0})'.format(n)) result = fib(n) self.producer.publish( {'result': result}, exchange='', routing_key=message.properties['reply_to'], correlation_id=message.properties['correlation_id'], serializer='json', retry=True, ) message.ack() .. seealso:: :file:`examples/rpc-tut6/` in the Github repository. Advanced Topics =============== RabbitMQ -------- Consumer Priorities ~~~~~~~~~~~~~~~~~~~ RabbitMQ defines a consumer priority extension to the amqp protocol, that can be enabled by setting the ``x-priority`` argument to ``basic.consume``. In kombu you can specify this argument on the :class:`~kombu.Queue`, like this: .. code-block:: python queue = Queue('name', Exchange('exchange_name', type='direct'), consumer_arguments={'x-priority': 10}) Read more about consumer priorities here: https://www.rabbitmq.com/consumer-priority.html Reference ========= .. autoclass:: kombu.Consumer :noindex: :members: kombu-4.1.0/docs/userguide/serialization.rst0000644000175000017500000001425713130603207021116 0ustar omeromer00000000000000.. _guide-serialization: =============== Serialization =============== .. _serializers: Serializers =========== By default every message is encoded using `JSON`_, so sending Python data structures like dictionaries and lists works. `YAML`_, `msgpack`_ and Python's built-in `pickle` module is also supported, and if needed you can register any custom serialization scheme you want to use. By default Kombu will only load JSON messages, so if you want to use other serialization format you must explicitly enable them in your consumer by using the ``accept`` argument: .. code-block:: python Consumer(conn, [queue], accept=['json', 'pickle', 'msgpack']) The accept argument can also include MIME-types. .. _`JSON`: http://www.json.org/ .. _`YAML`: http://yaml.org/ .. _`msgpack`: http://msgpack.sourceforge.net/ Each option has its advantages and disadvantages. `json` -- JSON is supported in many programming languages, is now a standard part of Python (since 2.6), and is fairly fast to decode using the modern Python libraries such as `cjson` or `simplejson`. The primary disadvantage to `JSON` is that it limits you to the following data types: strings, Unicode, floats, boolean, dictionaries, and lists. Decimals and dates are notably missing. Also, binary data will be transferred using Base64 encoding, which will cause the transferred data to be around 34% larger than an encoding which supports native binary types. However, if your data fits inside the above constraints and you need cross-language support, the default setting of `JSON` is probably your best choice. `pickle` -- If you have no desire to support any language other than Python, then using the `pickle` encoding will gain you the support of all built-in Python data types (except class instances), smaller messages when sending binary files, and a slight speedup over `JSON` processing. .. admonition:: Pickle and Security The pickle format is very convenient as it can serialize and deserialize almost any object, but this is also a concern for security. Carefully crafted pickle payloads can do almost anything a regular Python program can do, so if you let your consumer automatically decode pickled objects you must make sure to limit access to the broker so that untrusted parties do not have the ability to send messages! By default Kombu uses pickle protocol 2, but this can be changed using the :envvar:`PICKLE_PROTOCOL` environment variable or by changing the global :data:`kombu.serialization.pickle_protocol` flag. `yaml` -- YAML has many of the same characteristics as `json`, except that it natively supports more data types (including dates, recursive references, etc.) However, the Python libraries for YAML are a good bit slower than the libraries for JSON. If you need a more expressive set of data types and need to maintain cross-language compatibility, then `YAML` may be a better fit than the above. To instruct `Kombu` to use an alternate serialization method, use one of the following options. 1. Set the serialization option on a per-producer basis: .. code-block:: pycon >>> producer = Producer(channel, ... exchange=exchange, ... serializer='yaml') 2. Set the serialization option per message: .. code-block:: pycon >>> producer.publish(message, routing_key=rkey, ... serializer='pickle') Note that a `Consumer` do not need the serialization method specified. They can auto-detect the serialization method as the content-type is sent as a message header. .. _sending-raw-data: Sending raw data without Serialization ====================================== In some cases, you don't need your message data to be serialized. If you pass in a plain string or Unicode object as your message and a custom `content_type`, then `Kombu` will not waste cycles serializing/deserializing the data. You can optionally specify a `content_encoding` for the raw data: .. code-block:: pycon >>> with open('~/my_picture.jpg', 'rb') as fh: ... producer.publish(fh.read(), content_type='image/jpeg', content_encoding='binary', routing_key=rkey) The `Message` object returned by the `Consumer` class will have a `content_type` and `content_encoding` attribute. .. _serialization-entrypoints: Creating extensions using Setuptools entry-points ================================================= A package can also register new serializers using Setuptools entry-points. The entry-point must provide the name of the serializer along with the path to a tuple providing the rest of the args: ``encoder_function, decoder_function, content_type, content_encoding``. An example entrypoint could be: .. code-block:: python from setuptools import setup setup( entry_points={ 'kombu.serializers': [ 'my_serializer = my_module.serializer:register_args' ] } ) Then the module ``my_module.serializer`` would look like: .. code-block:: python register_args = (my_encoder, my_decoder, 'application/x-mimetype', 'utf-8') When this package is installed the new 'my_serializer' serializer will be supported by Kombu. .. admonition:: Buffer Objects The decoder function of custom serializer must support both strings and Python's old-style buffer objects. Python pickle and json modules usually don't do this via its ``loads`` function, but you can easily add support by making a wrapper around the ``load`` function that takes file objects instead of strings. Here's an example wrapping :func:`pickle.loads` in such a way: .. code-block:: python import pickle from io import BytesIO from kombu import serialization def loads(s): return pickle.load(BytesIO(s)) serialization.register( 'my_pickle', pickle.dumps, loads, content_type='application/x-pickle2', content_encoding='binary', ) kombu-4.1.0/docs/userguide/producers.rst0000644000175000017500000000706213130603207020243 0ustar omeromer00000000000000.. _guide-producers: =========== Producers =========== .. _producer-basics: Basics ====== You can create a producer using a :class:`~kombu.Connection`: .. code-block:: pycon >>> producer = connection.Producer() You can also instantiate :class:`~kombu.Producer` directly, it takes a channel or a connection as an argument: .. code-block:: pycon >>> with Connection('amqp://') as conn: ... with conn.channel() as channel: ... producer = Producer(channel) Having a producer instance you can publish messages: .. code-block:: pycon >>> from kombu import Exchange >>> exchange = Exchange('name', type='direct') >>> producer.publish( ... {'hello': 'world'}, # message to send ... exchange=exchange, # destination exchange ... routing_key='rk', # destination routing key, ... decare=[exchange], # make sure exchange is declared, ... ) Mostly you will be getting a connection from a connection pool, and this connection can be stale, or you could lose the connection in the middle of sending the message. Using retries is a good way to handle these intermittent failures: .. code-block:: pycon >>> producer.publish({'hello': 'world', ..., retry=True}) In addition a retry policy can be specified, which is a dictionary of parameters supported by the :func:`~kombu.utils.functional.retry_over_time` function .. code-block:: pycon >>> producer.publish( ... {'hello': 'world'}, ..., ... retry=True, ... retry_policy={ ... 'interval_start': 0, # First retry immediately, ... 'interval_step': 2, # then increase by 2s for every retry. ... 'interval_max': 30, # but don't exceed 30s between retries. ... 'max_retries': 30, # give up after 30 tries. ... }, ... ) The ``declare`` argument lets you pass a list of entities that must be declared before sending the message. This is especially important when using the ``retry`` flag, since the broker may actually restart during a retry in which case non-durable entities are removed. Say you are writing a task queue, and the workers may have not started yet so the queues aren't declared. In this case you need to define both the exchange, and the declare the queue so that the message is delivered to the queue while the workers are offline: .. code-block:: pycon >>> from kombu import Exchange, Queue >>> task_queue = Queue('tasks', Exchange('tasks'), routing_key='tasks') >>> producer.publish( ... {'hello': 'world'}, ..., ... retry=True, ... exchange=task_queue.exchange, ... routing_key=task_queue.routing_key, ... declare=[task_queue], # declares exchange, queue and binds. ... ) Bypassing routing by using the anon-exchange -------------------------------------------- You may deliver to a queue directly, bypassing the brokers routing mechanisms, by using the "anon-exchange": set the exchange parameter to the empty string, and set the routing key to be the name of the queue: .. code-block:: pycon >>> producer.publish( ... {'hello': 'world'}, ... exchange='', ... routing_key=task_queue.name, ... ) Serialization ============= Json is the default serializer when a non-string object is passed to publish, but you can also specify a different serializer: .. code-block:: pycon >>> producer.publish({'hello': 'world'}, serializer='pickle') See :ref:`guide-serialization` for more information. Reference ========= .. autoclass:: kombu.Producer :noindex: :members: kombu-4.1.0/docs/faq.rst0000644000175000017500000000002413130603207014777 0ustar omeromer00000000000000.. include:: ../FAQ kombu-4.1.0/docs/Makefile0000644000175000017500000001751113130603207015147 0ustar omeromer00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don\'t have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " epub3 to make an epub3" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" @echo " apicheck to verify that all modules are present in autodoc" .PHONY: clean clean: rm -rf $(BUILDDIR)/* .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PROJ.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PROJ.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/PROJ" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PROJ" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 epub3: $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: apicheck apicheck: $(SPHINXBUILD) -b apicheck $(ALLSPHINXOPTS) $(BUILDDIR)/apicheck .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." kombu-4.1.0/docs/_templates/0000755000175000017500000000000013134154263015646 5ustar omeromer00000000000000kombu-4.1.0/docs/_templates/sidebardonations.html0000644000175000017500000000601213130603207022054 0ustar omeromer00000000000000 kombu-4.1.0/docs/_static/0000755000175000017500000000000013134154263015137 5ustar omeromer00000000000000kombu-4.1.0/docs/_static/.keep0000644000175000017500000000000013130603207016043 0ustar omeromer00000000000000kombu-4.1.0/docs/templates/0000755000175000017500000000000013134154263015507 5ustar omeromer00000000000000kombu-4.1.0/docs/templates/readme.txt0000644000175000017500000000233513130603207017501 0ustar omeromer00000000000000======================================== kombu - Messaging library for Python ======================================== |build-status| |coverage| |license| |wheel| |pyversion| |pyimp| .. include:: ../includes/introduction.txt .. include:: ../includes/installation.txt .. include:: ../includes/resources.txt .. |build-status| image:: https://secure.travis-ci.org/celery/kombu.png?branch=master :alt: Build status :target: https://travis-ci.org/celery/kombu .. |coverage| image:: https://codecov.io/github/celery/kombu/coverage.svg?branch=master :target: https://codecov.io/github/celery/kombu?branch=master .. |license| image:: https://img.shields.io/pypi/l/kombu.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/kombu.svg :alt: Kombu can be installed via wheel :target: http://pypi.python.org/pypi/kombu/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/kombu.svg :alt: Supported Python versions. :target: http://pypi.python.org/pypi/kombu/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/kombu.svg :alt: Support Python implementations. :target: http://pypi.python.org/pypi/kombu/ -- kombu-4.1.0/README.rst0000644000175000017500000003030313134154022014240 0ustar omeromer00000000000000======================================== kombu - Messaging library for Python ======================================== |build-status| |coverage| |license| |wheel| |pyversion| |pyimp| :Version: 4.1.0 :Web: http://kombu.me/ :Download: http://pypi.python.org/pypi/kombu/ :Source: https://github.com/celery/kombu/ :Keywords: messaging, amqp, rabbitmq, redis, mongodb, python, queue About ===== `Kombu` is a messaging library for Python. The aim of `Kombu` is to make messaging in Python as easy as possible by providing an idiomatic high-level interface for the AMQ protocol, and also provide proven and tested solutions to common messaging problems. `AMQP`_ is the Advanced Message Queuing Protocol, an open standard protocol for message orientation, queuing, routing, reliability and security, for which the `RabbitMQ`_ messaging server is the most popular implementation. Features ======== * Allows application authors to support several message server solutions by using pluggable transports. * AMQP transport using the `py-amqp`_, `librabbitmq`_, or `qpid-python`_ libraries. * High performance AMQP transport written in C - when using `librabbitmq`_ This is automatically enabled if librabbitmq is installed: :: $ pip install librabbitmq * Virtual transports makes it really easy to add support for non-AMQP transports. There is already built-in support for `Redis`_, `Amazon SQS`_, `ZooKeeper`_, `SoftLayer MQ`_ and `Pyro`_. * In-memory transport for unit testing. * Supports automatic encoding, serialization and compression of message payloads. * Consistent exception handling across transports. * The ability to ensure that an operation is performed by gracefully handling connection and channel errors. * Several annoyances with `amqplib`_ has been fixed, like supporting timeouts and the ability to wait for events on more than one channel. * Projects already using `carrot`_ can easily be ported by using a compatibility layer. For an introduction to AMQP you should read the article `Rabbits and warrens`_, and the `Wikipedia article about AMQP`_. .. _`RabbitMQ`: https://www.rabbitmq.com/ .. _`AMQP`: https://amqp.org .. _`py-amqp`: https://pypi.python.org/pypi/amqp/ .. _`qpid-python`: https://pypi.python.org/pypi/qpid-python/ .. _`Redis`: https://redis.io .. _`Amazon SQS`: https://aws.amazon.com/sqs/ .. _`Zookeeper`: https://zookeeper.apache.org/ .. _`Rabbits and warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ .. _`amqplib`: https://barryp.org/software/py-amqplib/ .. _`Wikipedia article about AMQP`: https://en.wikipedia.org/wiki/AMQP .. _`carrot`: https://pypi.python.org/pypi/carrot/ .. _`librabbitmq`: https://pypi.python.org/pypi/librabbitmq .. _`Pyro`: https://pythonhosting.org/Pyro4 .. _`SoftLayer MQ`: https://sldn.softlayer.com/reference/messagequeueapi .. _transport-comparison: Transport Comparison ==================== +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | **Client** | **Type** | **Direct** | **Topic** | **Fanout** | **Priority** | **TTL** | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *amqp* | Native | Yes | Yes | Yes | Yes [#f3]_ | Yes [#f4]_ | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *qpid* | Native | Yes | Yes | Yes | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | Yes | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *mongodb* | Virtual | Yes | Yes | Yes | Yes | Yes | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | Yes | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *in-memory* | Virtual | Yes | Yes [#f1]_ | No | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ | *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | No | No | +---------------+----------+------------+------------+---------------+--------------+-----------------------+ .. [#f1] Declarations only kept in memory, so exchanges/queues must be declared by all clients that needs them. .. [#f2] Fanout supported via storing routing tables in SimpleDB. Disabled by default, but can be enabled by using the ``supports_fanout`` transport option. .. [#f3] AMQP Message priority support depends on broker implementation. .. [#f4] AMQP Message/Queue TTL support depends on broker implementation. Documentation ------------- Kombu is using Sphinx, and the latest documentation can be found here: https://kombu.readthedocs.io/ Quick overview -------------- .. code:: python from kombu import Connection, Exchange, Queue media_exchange = Exchange('media', 'direct', durable=True) video_queue = Queue('video', exchange=media_exchange, routing_key='video') def process_media(body, message): print body message.ack() # connections with Connection('amqp://guest:guest@localhost//') as conn: # produce producer = conn.Producer(serializer='json') producer.publish({'name': '/tmp/lolcat1.avi', 'size': 1301013}, exchange=media_exchange, routing_key='video', declare=[video_queue]) # the declare above, makes sure the video queue is declared # so that the messages can be delivered. # It's a best practice in Kombu to have both publishers and # consumers declare the queue. You can also declare the # queue manually using: # video_queue(conn).declare() # consume with conn.Consumer(video_queue, callbacks=[process_media]) as consumer: # Process messages and handle events on all channels while True: conn.drain_events() # Consume from several queues on the same channel: video_queue = Queue('video', exchange=media_exchange, key='video') image_queue = Queue('image', exchange=media_exchange, key='image') with connection.Consumer([video_queue, image_queue], callbacks=[process_media]) as consumer: while True: connection.drain_events() Or handle channels manually: .. code:: python with connection.channel() as channel: producer = Producer(channel, ...) consumer = Producer(channel) All objects can be used outside of with statements too, just remember to close the objects after use: .. code:: python from kombu import Connection, Consumer, Producer connection = Connection() # ... connection.release() consumer = Consumer(channel_or_connection, ...) consumer.register_callback(my_callback) consumer.consume() # .... consumer.cancel() `Exchange` and `Queue` are simply declarations that can be pickled and used in configuration files etc. They also support operations, but to do so they need to be bound to a channel. Binding exchanges and queues to a connection will make it use that connections default channel. :: >>> exchange = Exchange('tasks', 'direct') >>> connection = Connection() >>> bound_exchange = exchange(connection) >>> bound_exchange.delete() # the original exchange is not affected, and stays unbound. >>> exchange.delete() raise NotBoundError: Can't call delete on Exchange not bound to a channel. Terminology =========== There are some concepts you should be familiar with before starting: * Producers Producers sends messages to an exchange. * Exchanges Messages are sent to exchanges. Exchanges are named and can be configured to use one of several routing algorithms. The exchange routes the messages to consumers by matching the routing key in the message with the routing key the consumer provides when binding to the exchange. * Consumers Consumers declares a queue, binds it to a exchange and receives messages from it. * Queues Queues receive messages sent to exchanges. The queues are declared by consumers. * Routing keys Every message has a routing key. The interpretation of the routing key depends on the exchange type. There are four default exchange types defined by the AMQP standard, and vendors can define custom types (so see your vendors manual for details). These are the default exchange types defined by AMQP/0.8: * Direct exchange Matches if the routing key property of the message and the `routing_key` attribute of the consumer are identical. * Fan-out exchange Always matches, even if the binding does not have a routing key. * Topic exchange Matches the routing key property of the message by a primitive pattern matching scheme. The message routing key then consists of words separated by dots (`"."`, like domain names), and two special characters are available; star (`"*"`) and hash (`"#"`). The star matches any word, and the hash matches zero or more words. For example `"*.stock.#"` matches the routing keys `"usd.stock"` and `"eur.stock.db"` but not `"stock.nasdaq"`. Installation ============ You can install `Kombu` either via the Python Package Index (PyPI) or from source. To install using `pip`,: :: $ pip install kombu To install using `easy_install`,: :: $ easy_install kombu If you have downloaded a source tarball you can install it by doing the following,: :: $ python setup.py build # python setup.py install # as root Getting Help ============ Mailing list ------------ Join the `carrot-users`_ mailing list. .. _`carrot-users`: https://groups.google.com/group/carrot-users/ Bug tracker =========== If you have any suggestions, bug reports or annoyances please report them to our issue tracker at https://github.com/celery/kombu/issues/ Contributing ============ Development of `Kombu` happens at Github: https://github.com/celery/kombu You are highly encouraged to participate in the development. If you don't like Github (for some reason) you're welcome to send regular patches. License ======= This software is licensed under the `New BSD License`. See the `LICENSE` file in the top distribution directory for the full license text. .. |build-status| image:: https://secure.travis-ci.org/celery/kombu.png?branch=master :alt: Build status :target: https://travis-ci.org/celery/kombu .. |coverage| image:: https://codecov.io/github/celery/kombu/coverage.svg?branch=master :target: https://codecov.io/github/celery/kombu?branch=master .. |license| image:: https://img.shields.io/pypi/l/kombu.svg :alt: BSD License :target: https://opensource.org/licenses/BSD-3-Clause .. |wheel| image:: https://img.shields.io/pypi/wheel/kombu.svg :alt: Kombu can be installed via wheel :target: https://pypi.python.org/pypi/kombu/ .. |pyversion| image:: https://img.shields.io/pypi/pyversions/kombu.svg :alt: Supported Python versions. :target: https://pypi.python.org/pypi/kombu/ .. |pyimp| image:: https://img.shields.io/pypi/implementation/kombu.svg :alt: Support Python implementations. :target: https://pypi.python.org/pypi/kombu/ -- kombu-4.1.0/kombu/0000755000175000017500000000000013134154263013676 5ustar omeromer00000000000000kombu-4.1.0/kombu/connection.py0000644000175000017500000010620713130603207016406 0ustar omeromer00000000000000"""Client (Connection).""" from __future__ import absolute_import, unicode_literals import os import socket import sys from collections import OrderedDict from contextlib import contextmanager from itertools import count, cycle from operator import itemgetter # jython breaks on relative import for .exceptions for some reason # (Issue #112) from kombu import exceptions from .five import ( bytes_if_py2, python_2_unicode_compatible, reraise, string_t, text_t, ) from .log import get_logger from .resource import Resource from .transport import get_transport_cls, supports_librabbitmq from .utils.collections import HashedSeq from .utils.functional import dictfilter, lazy, retry_over_time, shufflecycle from .utils.objects import cached_property from .utils.url import as_url, parse_url, quote, urlparse __all__ = ['Connection', 'ConnectionPool', 'ChannelPool'] logger = get_logger(__name__) roundrobin_failover = cycle resolve_aliases = { 'pyamqp': 'amqp', 'librabbitmq': 'amqp', } failover_strategies = { 'round-robin': roundrobin_failover, 'shuffle': shufflecycle, } _log_connection = os.environ.get('KOMBU_LOG_CONNECTION', False) _log_channel = os.environ.get('KOMBU_LOG_CHANNEL', False) @python_2_unicode_compatible class Connection(object): """A connection to the broker. Example: >>> Connection('amqp://guest:guest@localhost:5672//') >>> Connection('amqp://foo;amqp://bar', ... failover_strategy='round-robin') >>> Connection('redis://', transport_options={ ... 'visibility_timeout': 3000, ... }) >>> import ssl >>> Connection('amqp://', login_method='EXTERNAL', ssl={ ... 'ca_certs': '/etc/pki/tls/certs/something.crt', ... 'keyfile': '/etc/something/system.key', ... 'certfile': '/etc/something/system.cert', ... 'cert_reqs': ssl.CERT_REQUIRED, ... }) Note: SSL currently only works with the py-amqp, and qpid transports. For other transports you can use stunnel. Arguments: URL (str, Sequence): Broker URL, or a list of URLs. Keyword Arguments: ssl (bool): Use SSL to connect to the server. Default is ``False``. May not be supported by the specified transport. transport (Transport): Default transport if not specified in the URL. connect_timeout (float): Timeout in seconds for connecting to the server. May not be supported by the specified transport. transport_options (Dict): A dict of additional connection arguments to pass to alternate kombu channel implementations. Consult the transport documentation for available options. heartbeat (float): Heartbeat interval in int/float seconds. Note that if heartbeats are enabled then the :meth:`heartbeat_check` method must be called regularly, around once per second. Note: The connection is established lazily when needed. If you need the connection to be established, then force it by calling :meth:`connect`:: >>> conn = Connection('amqp://') >>> conn.connect() and always remember to close the connection:: >>> conn.release() These options have been replaced by the URL argument, but are still supported for backwards compatibility: :keyword hostname: Host name/address. NOTE: You cannot specify both the URL argument and use the hostname keyword argument at the same time. :keyword userid: Default user name if not provided in the URL. :keyword password: Default password if not provided in the URL. :keyword virtual_host: Default virtual host if not provided in the URL. :keyword port: Default port if not provided in the URL. """ port = None virtual_host = '/' connect_timeout = 5 _closed = None _connection = None _default_channel = None _transport = None _logger = False uri_prefix = None #: The cache of declared entities is per connection, #: in case the server loses data. declared_entities = None #: Iterator returning the next broker URL to try in the event #: of connection failure (initialized by :attr:`failover_strategy`). cycle = None #: Additional transport specific options, #: passed on to the transport instance. transport_options = None #: Strategy used to select new hosts when reconnecting after connection #: failure. One of "round-robin", "shuffle" or any custom iterator #: constantly yielding new URLs to try. failover_strategy = 'round-robin' #: Heartbeat value, currently only supported by the py-amqp transport. heartbeat = None resolve_aliases = resolve_aliases failover_strategies = failover_strategies hostname = userid = password = ssl = login_method = None def __init__(self, hostname='localhost', userid=None, password=None, virtual_host=None, port=None, insist=False, ssl=False, transport=None, connect_timeout=5, transport_options=None, login_method=None, uri_prefix=None, heartbeat=0, failover_strategy='round-robin', alternates=None, **kwargs): alt = [] if alternates is None else alternates # have to spell the args out, just to get nice docstrings :( params = self._initial_params = { 'hostname': hostname, 'userid': userid, 'password': password, 'virtual_host': virtual_host, 'port': port, 'insist': insist, 'ssl': ssl, 'transport': transport, 'connect_timeout': connect_timeout, 'login_method': login_method, 'heartbeat': heartbeat } if hostname and not isinstance(hostname, string_t): alt.extend(hostname) hostname = alt[0] if hostname and '://' in hostname: if ';' in hostname: alt.extend(hostname.split(';')) hostname = alt[0] if '+' in hostname[:hostname.index('://')]: # e.g. sqla+mysql://root:masterkey@localhost/ params['transport'], params['hostname'] = \ hostname.split('+', 1) transport = self.uri_prefix = params['transport'] else: transport = transport or urlparse(hostname).scheme if not get_transport_cls(transport).can_parse_url: # we must parse the URL url_params = parse_url(hostname) params.update( dictfilter(url_params), hostname=url_params['hostname'], ) params['transport'] = transport self._init_params(**params) # fallback hosts self.alt = alt # keep text representation for .info # only temporary solution as this won't work when # passing a custom object (Issue celery/celery#3320). self._failover_strategy = failover_strategy or 'round-robin' self.failover_strategy = self.failover_strategies.get( self._failover_strategy) or self._failover_strategy if self.alt: self.cycle = self.failover_strategy(self.alt) next(self.cycle) # skip first entry if transport_options is None: transport_options = {} self.transport_options = transport_options if _log_connection: # pragma: no cover self._logger = True if uri_prefix: self.uri_prefix = uri_prefix self.declared_entities = set() def switch(self, url): """Switch connection parameters to use a new URL. Note: Does not reconnect! """ self.close() self.declared_entities.clear() self._closed = False self._init_params(**dict(self._initial_params, **parse_url(url))) def maybe_switch_next(self): """Switch to next URL given by the current failover strategy.""" if self.cycle: self.switch(next(self.cycle)) def _init_params(self, hostname, userid, password, virtual_host, port, insist, ssl, transport, connect_timeout, login_method, heartbeat): transport = transport or 'amqp' if transport == 'amqp' and supports_librabbitmq(): transport = 'librabbitmq' self.hostname = hostname self.userid = userid self.password = password self.login_method = login_method self.virtual_host = virtual_host or self.virtual_host self.port = port or self.port self.insist = insist self.connect_timeout = connect_timeout self.ssl = ssl self.transport_cls = transport self.heartbeat = heartbeat and float(heartbeat) def register_with_event_loop(self, loop): self.transport.register_with_event_loop(self.connection, loop) def _debug(self, msg, *args, **kwargs): if self._logger: # pragma: no cover fmt = '[Kombu connection:{id:#x}] {msg}' logger.debug(fmt.format(id=id(self), msg=text_t(msg)), *args, **kwargs) def connect(self): """Establish connection to server immediately.""" self._closed = False return self.connection def channel(self): """Create and return a new channel.""" self._debug('create channel') chan = self.transport.create_channel(self.connection) if _log_channel: # pragma: no cover from .utils.debug import Logwrapped return Logwrapped(chan, 'kombu.channel', '[Kombu channel:{0.channel_id}] ') return chan def heartbeat_check(self, rate=2): """Check heartbeats. Allow the transport to perform any periodic tasks required to make heartbeats work. This should be called approximately every second. If the current transport does not support heartbeats then this is a noop operation. Arguments: rate (int): Rate is how often the tick is called compared to the actual heartbeat value. E.g. if the heartbeat is set to 3 seconds, and the tick is called every 3 / 2 seconds, then the rate is 2. This value is currently unused by any transports. """ return self.transport.heartbeat_check(self.connection, rate=rate) def drain_events(self, **kwargs): """Wait for a single event from the server. Arguments: timeout (float): Timeout in seconds before we give up. Raises: socket.timeout: if the timeout is exceeded. """ return self.transport.drain_events(self.connection, **kwargs) def maybe_close_channel(self, channel): """Close given channel, but ignore connection and channel errors.""" try: channel.close() except (self.connection_errors + self.channel_errors): pass def _do_close_self(self): # Close only connection and channel(s), but not transport. self.declared_entities.clear() if self._default_channel: self.maybe_close_channel(self._default_channel) if self._connection: try: self.transport.close_connection(self._connection) except self.connection_errors + (AttributeError, socket.error): pass self._connection = None def _close(self): """Really close connection, even if part of a connection pool.""" self._do_close_self() self._do_close_transport() self._debug('closed') self._closed = True def _do_close_transport(self): if self._transport: self._transport.client = None self._transport = None def collect(self, socket_timeout=None): # amqp requires communication to close, we don't need that just # to clear out references, Transport._collect can also be implemented # by other transports that want fast after fork try: gc_transport = self._transport._collect except AttributeError: _timeo = socket.getdefaulttimeout() socket.setdefaulttimeout(socket_timeout) try: self._do_close_self() except socket.timeout: pass finally: socket.setdefaulttimeout(_timeo) else: gc_transport(self._connection) self._do_close_transport() self.declared_entities.clear() self._connection = None def release(self): """Close the connection (if open).""" self._close() close = release def ensure_connection(self, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30, callback=None, reraise_as_library_errors=True): """Ensure we have a connection to the server. If not retry establishing the connection with the settings specified. Arguments: errback (Callable): Optional callback called each time the connection can't be established. Arguments provided are the exception raised and the interval that will be slept ``(exc, interval)``. max_retries (int): Maximum number of times to retry. If this limit is exceeded the connection error will be re-raised. interval_start (float): The number of seconds we start sleeping for. interval_step (float): How many seconds added to the interval for each retry. interval_max (float): Maximum number of seconds to sleep between each retry. callback (Callable): Optional callback that is called for every internal iteration (1 s). """ def on_error(exc, intervals, retries, interval=0): round = self.completes_cycle(retries) if round: interval = next(intervals) if errback: errback(exc, interval) self.maybe_switch_next() # select next host return interval if round else 0 ctx = self._reraise_as_library_errors if not reraise_as_library_errors: ctx = self._dummy_context with ctx(): retry_over_time(self.connect, self.recoverable_connection_errors, (), {}, on_error, max_retries, interval_start, interval_step, interval_max, callback) return self @contextmanager def _reraise_as_library_errors( self, ConnectionError=exceptions.OperationalError, ChannelError=exceptions.OperationalError): try: yield except (ConnectionError, ChannelError): raise except self.recoverable_connection_errors as exc: reraise(ConnectionError, ConnectionError(text_t(exc)), sys.exc_info()[2]) except self.recoverable_channel_errors as exc: reraise(ChannelError, ChannelError(text_t(exc)), sys.exc_info()[2]) @contextmanager def _dummy_context(self): yield def completes_cycle(self, retries): """Return true if the cycle is complete after number of `retries`.""" return not (retries + 1) % len(self.alt) if self.alt else True def revive(self, new_channel): """Revive connection after connection re-established.""" if self._default_channel and new_channel is not self._default_channel: self.maybe_close_channel(self._default_channel) self._default_channel = None def _default_ensure_callback(self, exc, interval): logger.error("Ensure: Operation error: %r. Retry in %ss", exc, interval, exc_info=True) def ensure(self, obj, fun, errback=None, max_retries=None, interval_start=1, interval_step=1, interval_max=1, on_revive=None): """Ensure operation completes. Regardless of any channel/connection errors occurring. Retries by establishing the connection, and reapplying the function. Arguments: fun (Callable): Method to apply. errback (Callable): Optional callback called each time the connection can't be established. Arguments provided are the exception raised and the interval that will be slept ``(exc, interval)``. max_retries (int): Maximum number of times to retry. If this limit is exceeded the connection error will be re-raised. interval_start (float): The number of seconds we start sleeping for. interval_step (float): How many seconds added to the interval for each retry. interval_max (float): Maximum number of seconds to sleep between each retry. Examples: >>> from kombu import Connection, Producer >>> conn = Connection('amqp://') >>> producer = Producer(conn) >>> def errback(exc, interval): ... logger.error('Error: %r', exc, exc_info=1) ... logger.info('Retry in %s seconds.', interval) >>> publish = conn.ensure(producer, producer.publish, ... errback=errback, max_retries=3) >>> publish({'hello': 'world'}, routing_key='dest') """ def _ensured(*args, **kwargs): got_connection = 0 conn_errors = self.recoverable_connection_errors chan_errors = self.recoverable_channel_errors has_modern_errors = hasattr( self.transport, 'recoverable_connection_errors', ) with self._reraise_as_library_errors(): for retries in count(0): # for infinity try: return fun(*args, **kwargs) except conn_errors as exc: if got_connection and not has_modern_errors: # transport can not distinguish between # recoverable/irrecoverable errors, so we propagate # the error if it persists after a new connection # was successfully established. raise if max_retries is not None and retries > max_retries: raise self._debug('ensure connection error: %r', exc, exc_info=1) self.collect() errback and errback(exc, 0) remaining_retries = None if max_retries is not None: remaining_retries = max(max_retries - retries, 1) self.ensure_connection( errback, remaining_retries, interval_start, interval_step, interval_max, reraise_as_library_errors=False, ) channel = self.default_channel obj.revive(channel) if on_revive: on_revive(channel) got_connection += 1 except chan_errors as exc: if max_retries is not None and retries > max_retries: raise self._debug('ensure channel error: %r', exc, exc_info=1) errback and errback(exc, 0) _ensured.__name__ = bytes_if_py2('{0}(ensured)'.format(fun.__name__)) _ensured.__doc__ = fun.__doc__ _ensured.__module__ = fun.__module__ return _ensured def autoretry(self, fun, channel=None, **ensure_options): """Decorator for functions supporting a ``channel`` keyword argument. The resulting callable will retry calling the function if it raises connection or channel related errors. The return value will be a tuple of ``(retval, last_created_channel)``. If a ``channel`` is not provided, then one will be automatically acquired (remember to close it afterwards). See Also: :meth:`ensure` for the full list of supported keyword arguments. Example: >>> channel = connection.channel() >>> try: ... ret, channel = connection.autoretry( ... publish_messages, channel) ... finally: ... channel.close() """ channels = [channel] class Revival(object): __name__ = getattr(fun, '__name__', None) __module__ = getattr(fun, '__module__', None) __doc__ = getattr(fun, '__doc__', None) def __init__(self, connection): self.connection = connection def revive(self, channel): channels[0] = channel def __call__(self, *args, **kwargs): if channels[0] is None: self.revive(self.connection.default_channel) return fun(*args, channel=channels[0], **kwargs), channels[0] revive = Revival(self) return self.ensure(revive, revive, **ensure_options) def create_transport(self): return self.get_transport_cls()(client=self) def get_transport_cls(self): """Get the currently used transport class.""" transport_cls = self.transport_cls if not transport_cls or isinstance(transport_cls, string_t): transport_cls = get_transport_cls(transport_cls) return transport_cls def clone(self, **kwargs): """Create a copy of the connection with same settings.""" return self.__class__(**dict(self._info(resolve=False), **kwargs)) def get_heartbeat_interval(self): return self.transport.get_heartbeat_interval(self.connection) def _info(self, resolve=True): transport_cls = self.transport_cls if resolve: transport_cls = self.resolve_aliases.get( transport_cls, transport_cls) D = self.transport.default_connection_params hostname = self.hostname or D.get('hostname') if self.uri_prefix: hostname = '%s+%s' % (self.uri_prefix, hostname) info = ( ('hostname', hostname), ('userid', self.userid or D.get('userid')), ('password', self.password or D.get('password')), ('virtual_host', self.virtual_host or D.get('virtual_host')), ('port', self.port or D.get('port')), ('insist', self.insist), ('ssl', self.ssl), ('transport', transport_cls), ('connect_timeout', self.connect_timeout), ('transport_options', self.transport_options), ('login_method', self.login_method or D.get('login_method')), ('uri_prefix', self.uri_prefix), ('heartbeat', self.heartbeat), ('failover_strategy', self._failover_strategy), ('alternates', self.alt), ) return info def info(self): """Get connection info.""" return OrderedDict(self._info()) def __eqhash__(self): return HashedSeq(self.transport_cls, self.hostname, self.userid, self.password, self.virtual_host, self.port, repr(self.transport_options)) def as_uri(self, include_password=False, mask='**', getfields=itemgetter('port', 'userid', 'password', 'virtual_host', 'transport')): """Convert connection parameters to URL form.""" hostname = self.hostname or 'localhost' if self.transport.can_parse_url: if self.uri_prefix: return '%s+%s' % (self.uri_prefix, hostname) return self.hostname if self.uri_prefix: return '%s+%s' % (self.uri_prefix, hostname) fields = self.info() port, userid, password, vhost, transport = getfields(fields) return as_url( transport, hostname, port, userid, password, quote(vhost), sanitize=not include_password, mask=mask, ) def Pool(self, limit=None, **kwargs): """Pool of connections. See Also: :class:`ConnectionPool`. Arguments: limit (int): Maximum number of active connections. Default is no limit. Example: >>> connection = Connection('amqp://') >>> pool = connection.Pool(2) >>> c1 = pool.acquire() >>> c2 = pool.acquire() >>> c3 = pool.acquire() Traceback (most recent call last): File "", line 1, in File "kombu/connection.py", line 354, in acquire raise ConnectionLimitExceeded(self.limit) kombu.exceptions.ConnectionLimitExceeded: 2 >>> c1.release() >>> c3 = pool.acquire() """ return ConnectionPool(self, limit, **kwargs) def ChannelPool(self, limit=None, **kwargs): """Pool of channels. See Also: :class:`ChannelPool`. Arguments: limit (int): Maximum number of active channels. Default is no limit. Example: >>> connection = Connection('amqp://') >>> pool = connection.ChannelPool(2) >>> c1 = pool.acquire() >>> c2 = pool.acquire() >>> c3 = pool.acquire() Traceback (most recent call last): File "", line 1, in File "kombu/connection.py", line 354, in acquire raise ChannelLimitExceeded(self.limit) kombu.connection.ChannelLimitExceeded: 2 >>> c1.release() >>> c3 = pool.acquire() """ return ChannelPool(self, limit, **kwargs) def Producer(self, channel=None, *args, **kwargs): """Create new :class:`kombu.Producer` instance.""" from .messaging import Producer return Producer(channel or self, *args, **kwargs) def Consumer(self, queues=None, channel=None, *args, **kwargs): """Create new :class:`kombu.Consumer` instance.""" from .messaging import Consumer return Consumer(channel or self, queues, *args, **kwargs) def SimpleQueue(self, name, no_ack=None, queue_opts=None, exchange_opts=None, channel=None, **kwargs): """Simple persistent queue API. Create new :class:`~kombu.simple.SimpleQueue`, using a channel from this connection. If ``name`` is a string, a queue and exchange will be automatically created using that name as the name of the queue and exchange, also it will be used as the default routing key. Arguments: name (str, kombu.Queue): Name of the queue/or a queue. no_ack (bool): Disable acknowledgments. Default is false. queue_opts (Dict): Additional keyword arguments passed to the constructor of the automatically created :class:`~kombu.Queue`. exchange_opts (Dict): Additional keyword arguments passed to the constructor of the automatically created :class:`~kombu.Exchange`. channel (ChannelT): Custom channel to use. If not specified the connection default channel is used. """ from .simple import SimpleQueue return SimpleQueue(channel or self, name, no_ack, queue_opts, exchange_opts, **kwargs) def SimpleBuffer(self, name, no_ack=None, queue_opts=None, exchange_opts=None, channel=None, **kwargs): """Simple ephemeral queue API. Create new :class:`~kombu.simple.SimpleQueue` using a channel from this connection. See Also: Same as :meth:`SimpleQueue`, but configured with buffering semantics. The resulting queue and exchange will not be durable, also auto delete is enabled. Messages will be transient (not persistent), and acknowledgments are disabled (``no_ack``). """ from .simple import SimpleBuffer return SimpleBuffer(channel or self, name, no_ack, queue_opts, exchange_opts, **kwargs) def _establish_connection(self): self._debug('establishing connection...') conn = self.transport.establish_connection() self._debug('connection established: %r', self) return conn def supports_exchange_type(self, exchange_type): return exchange_type in self.transport.implements.exchange_type def __repr__(self): return ''.format(self.as_uri(), id(self)) def __copy__(self): return self.clone() def __reduce__(self): return self.__class__, tuple(self.info().values()), None def __enter__(self): return self def __exit__(self, *args): self.release() @property def qos_semantics_matches_spec(self): return self.transport.qos_semantics_matches_spec(self.connection) @property def connected(self): """Return true if the connection has been established.""" return (not self._closed and self._connection is not None and self.transport.verify_connection(self._connection)) @property def connection(self): """The underlying connection object. Warning: This instance is transport specific, so do not depend on the interface of this object. """ if not self._closed: if not self.connected: self.declared_entities.clear() self._default_channel = None self._connection = self._establish_connection() self._closed = False return self._connection @property def default_channel(self): """Default channel. Created upon access and closed when the connection is closed. Note: Can be used for automatic channel handling when you only need one channel, and also it is the channel implicitly used if a connection is passed instead of a channel, to functions that require a channel. """ # make sure we're still connected, and if not refresh. self.ensure_connection() if self._default_channel is None: self._default_channel = self.channel() return self._default_channel @property def host(self): """The host as a host name/port pair separated by colon.""" return ':'.join([self.hostname, str(self.port)]) @property def transport(self): if self._transport is None: self._transport = self.create_transport() return self._transport @cached_property def manager(self): """AMQP Management API. Experimental manager that can be used to manage/monitor the broker instance. Not available for all transports. """ return self.transport.manager def get_manager(self, *args, **kwargs): return self.transport.get_manager(*args, **kwargs) @cached_property def recoverable_connection_errors(self): """Recoverable connection errors. List of connection related exceptions that can be recovered from, but where the connection must be closed and re-established first. """ try: return self.transport.recoverable_connection_errors except AttributeError: # There were no such classification before, # and all errors were assumed to be recoverable, # so this is a fallback for transports that do # not support the new recoverable/irrecoverable classes. return self.connection_errors + self.channel_errors @cached_property def recoverable_channel_errors(self): """Recoverable channel errors. List of channel related exceptions that can be automatically recovered from without re-establishing the connection. """ try: return self.transport.recoverable_channel_errors except AttributeError: return () @cached_property def connection_errors(self): """List of exceptions that may be raised by the connection.""" return self.transport.connection_errors @cached_property def channel_errors(self): """List of exceptions that may be raised by the channel.""" return self.transport.channel_errors @property def supports_heartbeats(self): return self.transport.implements.heartbeats @property def is_evented(self): return self.transport.implements.async BrokerConnection = Connection # noqa: E305 class ConnectionPool(Resource): """Pool of connections.""" LimitExceeded = exceptions.ConnectionLimitExceeded close_after_fork = True def __init__(self, connection, limit=None, **kwargs): self.connection = connection super(ConnectionPool, self).__init__(limit=limit) def new(self): return self.connection.clone() def release_resource(self, resource): try: resource._debug('released') except AttributeError: pass def close_resource(self, resource): resource._close() def collect_resource(self, resource, socket_timeout=0.1): if not isinstance(resource, lazy): return resource.collect(socket_timeout) @contextmanager def acquire_channel(self, block=False): with self.acquire(block=block) as connection: yield connection, connection.default_channel def setup(self): if self.limit: q = self._resource.queue while len(q) < self.limit: self._resource.put_nowait(lazy(self.new)) def prepare(self, resource): if callable(resource): resource = resource() resource._debug('acquired') return resource class ChannelPool(Resource): """Pool of channels.""" LimitExceeded = exceptions.ChannelLimitExceeded def __init__(self, connection, limit=None, **kwargs): self.connection = connection super(ChannelPool, self).__init__(limit=limit) def new(self): return lazy(self.connection.channel) def setup(self): channel = self.new() if self.limit: q = self._resource.queue while len(q) < self.limit: self._resource.put_nowait(lazy(channel)) def prepare(self, channel): if callable(channel): channel = channel() return channel def maybe_channel(channel): """Get channel from object. Return the default channel if argument is a connection instance, otherwise just return the channel given. """ if is_connection(channel): return channel.default_channel return channel def is_connection(obj): return isinstance(obj, Connection) kombu-4.1.0/kombu/exceptions.py0000644000175000017500000000452413130603207016427 0ustar omeromer00000000000000"""Exceptions.""" from __future__ import absolute_import, unicode_literals from socket import timeout as TimeoutError # noqa from amqp import ChannelError, ConnectionError, ResourceError from kombu.five import python_2_unicode_compatible __all__ = [ 'KombuError', 'OperationalError', 'NotBoundError', 'MessageStateError', 'TimeoutError', 'LimitExceeded', 'ConnectionLimitExceeded', 'ChannelLimitExceeded', 'ConnectionError', 'ChannelError', 'VersionMismatch', 'SerializerNotInstalled', 'ResourceError', 'SerializationError', 'EncodeError', 'DecodeError', 'HttpError', 'InconsistencyError', ] class KombuError(Exception): """Common subclass for all Kombu exceptions.""" class OperationalError(KombuError): """Recoverable message transport connection error.""" class SerializationError(KombuError): """Failed to serialize/deserialize content.""" class EncodeError(SerializationError): """Cannot encode object.""" class DecodeError(SerializationError): """Cannot decode object.""" class NotBoundError(KombuError): """Trying to call channel dependent method on unbound entity.""" class MessageStateError(KombuError): """The message has already been acknowledged.""" class LimitExceeded(KombuError): """Limit exceeded.""" class ConnectionLimitExceeded(LimitExceeded): """Maximum number of simultaneous connections exceeded.""" class ChannelLimitExceeded(LimitExceeded): """Maximum number of simultaneous channels exceeded.""" class VersionMismatch(KombuError): """Library dependency version mismatch.""" class SerializerNotInstalled(KombuError): """Support for the requested serialization type is not installed.""" class ContentDisallowed(SerializerNotInstalled): """Consumer does not allow this content-type.""" class InconsistencyError(ConnectionError): """Data or environment has been found to be inconsistent. Depending on the cause it may be possible to retry the operation. """ @python_2_unicode_compatible class HttpError(Exception): """HTTP Client Error.""" def __init__(self, code, message=None, response=None): self.code = code self.message = message self.response = response super(HttpError, self).__init__(code, message, response) def __str__(self): return 'HTTP {0.code}: {0.message}'.format(self) kombu-4.1.0/kombu/compression.py0000644000175000017500000000405413130603207016605 0ustar omeromer00000000000000"""Compression utilities.""" from __future__ import absolute_import, unicode_literals from kombu.utils.encoding import ensure_bytes import zlib _aliases = {} _encoders = {} _decoders = {} __all__ = ['register', 'encoders', 'get_encoder', 'get_decoder', 'compress', 'decompress'] def register(encoder, decoder, content_type, aliases=[]): """Register new compression method. Arguments: encoder (Callable): Function used to compress text. decoder (Callable): Function used to decompress previously compressed text. content_type (str): The mime type this compression method identifies as. aliases (Sequence[str]): A list of names to associate with this compression method. """ _encoders[content_type] = encoder _decoders[content_type] = decoder _aliases.update((alias, content_type) for alias in aliases) def encoders(): """Return a list of available compression methods.""" return list(_encoders) def get_encoder(t): """Get encoder by alias name.""" t = _aliases.get(t, t) return _encoders[t], t def get_decoder(t): """Get decoder by alias name.""" return _decoders[_aliases.get(t, t)] def compress(body, content_type): """Compress text. Arguments: body (AnyStr): The text to compress. content_type (str): mime-type of compression method to use. """ encoder, content_type = get_encoder(content_type) return encoder(ensure_bytes(body)), content_type def decompress(body, content_type): """Decompress compressed text. Arguments: body (AnyStr): Previously compressed text to uncompress. content_type (str): mime-type of compression method used. """ return get_decoder(content_type)(body) register(zlib.compress, zlib.decompress, 'application/x-gzip', aliases=['gzip', 'zlib']) try: import bz2 except ImportError: pass # Jython? else: register(bz2.compress, bz2.decompress, 'application/x-bz2', aliases=['bzip2', 'bzip']) kombu-4.1.0/kombu/clocks.py0000644000175000017500000001106613130603207015523 0ustar omeromer00000000000000"""Logical Clocks and Synchronization.""" from __future__ import absolute_import, unicode_literals from threading import Lock from itertools import islice from operator import itemgetter from .five import python_2_unicode_compatible, zip __all__ = ['LamportClock', 'timetuple'] R_CLOCK = '_lamport(clock={0}, timestamp={1}, id={2} {3!r})' @python_2_unicode_compatible class timetuple(tuple): """Tuple of event clock information. Can be used as part of a heap to keep events ordered. Arguments: clock (int): Event clock value. timestamp (float): Event UNIX timestamp value. id (str): Event host id (e.g. ``hostname:pid``). obj (Any): Optional obj to associate with this event. """ __slots__ = () def __new__(cls, clock, timestamp, id, obj=None): return tuple.__new__(cls, (clock, timestamp, id, obj)) def __repr__(self): return R_CLOCK.format(*self) def __getnewargs__(self): return tuple(self) def __lt__(self, other): # 0: clock 1: timestamp 3: process id try: A, B = self[0], other[0] # uses logical clock value first if A and B: # use logical clock if available if A == B: # equal clocks use lower process id return self[2] < other[2] return A < B return self[1] < other[1] # ... or use timestamp except IndexError: return NotImplemented def __gt__(self, other): return other < self def __le__(self, other): return not other < self def __ge__(self, other): return not self < other clock = property(itemgetter(0)) timestamp = property(itemgetter(1)) id = property(itemgetter(2)) obj = property(itemgetter(3)) @python_2_unicode_compatible class LamportClock(object): """Lamport's logical clock. From Wikipedia: A Lamport logical clock is a monotonically incrementing software counter maintained in each process. It follows some simple rules: * A process increments its counter before each event in that process; * When a process sends a message, it includes its counter value with the message; * On receiving a message, the receiver process sets its counter to be greater than the maximum of its own value and the received value before it considers the message received. Conceptually, this logical clock can be thought of as a clock that only has meaning in relation to messages moving between processes. When a process receives a message, it resynchronizes its logical clock with the sender. See Also: * `Lamport timestamps`_ * `Lamports distributed mutex`_ .. _`Lamport Timestamps`: https://en.wikipedia.org/wiki/Lamport_timestamps .. _`Lamports distributed mutex`: https://bit.ly/p99ybE *Usage* When sending a message use :meth:`forward` to increment the clock, when receiving a message use :meth:`adjust` to sync with the time stamp of the incoming message. """ #: The clocks current value. value = 0 def __init__(self, initial_value=0, Lock=Lock): self.value = initial_value self.mutex = Lock() def adjust(self, other): with self.mutex: value = self.value = max(self.value, other) + 1 return value def forward(self): with self.mutex: self.value += 1 return self.value def sort_heap(self, h): """Sort heap of events. List of tuples containing at least two elements, representing an event, where the first element is the event's scalar clock value, and the second element is the id of the process (usually ``"hostname:pid"``): ``sh([(clock, processid, ...?), (...)])`` The list must already be sorted, which is why we refer to it as a heap. The tuple will not be unpacked, so more than two elements can be present. Will return the latest event. """ if h[0][0] == h[1][0]: same = [] for PN in zip(h, islice(h, 1, None)): if PN[0][0] != PN[1][0]: break # Prev and Next's clocks differ same.append(PN[0]) # return first item sorted by process id return sorted(same, key=lambda event: event[1])[0] # clock values unique, return first item return h[0] def __str__(self): return str(self.value) def __repr__(self): return ''.format(self) kombu-4.1.0/kombu/five.py0000644000175000017500000000026113130603207015171 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Python 2/3 Compatibility.""" from __future__ import absolute_import, unicode_literals import sys import vine.five sys.modules[__name__] = vine.five kombu-4.1.0/kombu/__init__.py0000644000175000017500000001031113134154022015774 0ustar omeromer00000000000000"""Messaging library for Python.""" from __future__ import absolute_import, unicode_literals import os import re import sys if sys.version_info < (2, 7): # pragma: no cover raise Exception('Kombu 4.0 requires Python versions 2.7 or later.') from collections import namedtuple # noqa __version__ = '4.1.0' __author__ = 'Ask Solem' __contact__ = 'ask@celeryproject.org' __homepage__ = 'https://kombu.readthedocs.io' __docformat__ = 'restructuredtext en' # -eof meta- version_info_t = namedtuple('version_info_t', ( 'major', 'minor', 'micro', 'releaselevel', 'serial', )) # bumpversion can only search for {current_version} # so we have to parse the version here. _temp = re.match( r'(\d+)\.(\d+).(\d+)(.+)?', __version__).groups() VERSION = version_info = version_info_t( int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '') del(_temp) del(re) STATICA_HACK = True globals()['kcah_acitats'[::-1].upper()] = False if STATICA_HACK: # pragma: no cover # This is never executed, but tricks static analyzers (PyDev, PyCharm, # pylint, etc.) into knowing the types of these symbols, and what # they contain. from kombu.connection import Connection, BrokerConnection # noqa from kombu.entity import Exchange, Queue, binding # noqa from kombu.message import Message # noqa from kombu.messaging import Consumer, Producer # noqa from kombu.pools import connections, producers # noqa from kombu.utils.url import parse_url # noqa from kombu.common import eventloop, uuid # noqa from kombu.serialization import ( # noqa enable_insecure_serializers, disable_insecure_serializers, ) # Lazy loading. # - See werkzeug/__init__.py for the rationale behind this. from types import ModuleType # noqa all_by_module = { 'kombu.connection': ['Connection', 'BrokerConnection'], 'kombu.entity': ['Exchange', 'Queue', 'binding'], 'kombu.message': ['Message'], 'kombu.messaging': ['Consumer', 'Producer'], 'kombu.pools': ['connections', 'producers'], 'kombu.utils.url': ['parse_url'], 'kombu.common': ['eventloop', 'uuid'], 'kombu.serialization': [ 'enable_insecure_serializers', 'disable_insecure_serializers', ], } object_origins = {} for module, items in all_by_module.items(): for item in items: object_origins[item] = module class module(ModuleType): """Customized Python module.""" def __getattr__(self, name): if name in object_origins: module = __import__(object_origins[name], None, None, [name]) for extra_name in all_by_module[module.__name__]: setattr(self, extra_name, getattr(module, extra_name)) return getattr(module, name) return ModuleType.__getattribute__(self, name) def __dir__(self): result = list(new_module.__all__) result.extend(('__file__', '__path__', '__doc__', '__all__', '__docformat__', '__name__', '__path__', 'VERSION', '__package__', '__version__', '__author__', '__contact__', '__homepage__', '__docformat__')) return result # 2.5 does not define __package__ try: package = __package__ except NameError: # pragma: no cover package = 'kombu' # keep a reference to this module so that it's not garbage collected old_module = sys.modules[__name__] new_module = sys.modules[__name__] = module(__name__) new_module.__dict__.update({ '__file__': __file__, '__path__': __path__, '__doc__': __doc__, '__all__': tuple(object_origins), '__version__': __version__, '__author__': __author__, '__contact__': __contact__, '__homepage__': __homepage__, '__docformat__': __docformat__, '__package__': package, 'version_info_t': version_info_t, 'version_info': version_info, 'VERSION': VERSION, 'absolute_import': absolute_import, 'unicode_literals': unicode_literals, }) if os.environ.get('KOMBU_LOG_DEBUG'): # pragma: no cover os.environ.update(KOMBU_LOG_CHANNEL='1', KOMBU_LOG_CONNECTION='1') from .utils import debug debug.setup_logging() kombu-4.1.0/kombu/compat.py0000644000175000017500000001464413130603207015535 0ustar omeromer00000000000000"""Carrot compatibility interface. See https://pypi.python.org/pypi/carrot for documentation. """ from __future__ import absolute_import, unicode_literals from itertools import count from . import messaging from .entity import Exchange, Queue from .five import items __all__ = ['Publisher', 'Consumer'] # XXX compat attribute entry_to_queue = Queue.from_dict def _iterconsume(connection, consumer, no_ack=False, limit=None): consumer.consume(no_ack=no_ack) for iteration in count(0): # for infinity if limit and iteration >= limit: break yield connection.drain_events() class Publisher(messaging.Producer): """Carrot compatible producer.""" exchange = '' exchange_type = 'direct' routing_key = '' durable = True auto_delete = False _closed = False def __init__(self, connection, exchange=None, routing_key=None, exchange_type=None, durable=None, auto_delete=None, channel=None, **kwargs): if channel: connection = channel self.exchange = exchange or self.exchange self.exchange_type = exchange_type or self.exchange_type self.routing_key = routing_key or self.routing_key if auto_delete is not None: self.auto_delete = auto_delete if durable is not None: self.durable = durable if not isinstance(self.exchange, Exchange): self.exchange = Exchange(name=self.exchange, type=self.exchange_type, routing_key=self.routing_key, auto_delete=self.auto_delete, durable=self.durable) super(Publisher, self).__init__(connection, self.exchange, **kwargs) def send(self, *args, **kwargs): return self.publish(*args, **kwargs) def close(self): super(Publisher, self).close() self._closed = True def __enter__(self): return self def __exit__(self, *exc_info): self.close() @property def backend(self): return self.channel class Consumer(messaging.Consumer): """Carrot compatible consumer.""" queue = '' exchange = '' routing_key = '' exchange_type = 'direct' durable = True exclusive = False auto_delete = False exchange_type = 'direct' _closed = False def __init__(self, connection, queue=None, exchange=None, routing_key=None, exchange_type=None, durable=None, exclusive=None, auto_delete=None, **kwargs): self.backend = connection.channel() if durable is not None: self.durable = durable if exclusive is not None: self.exclusive = exclusive if auto_delete is not None: self.auto_delete = auto_delete self.queue = queue or self.queue self.exchange = exchange or self.exchange self.exchange_type = exchange_type or self.exchange_type self.routing_key = routing_key or self.routing_key exchange = Exchange(self.exchange, type=self.exchange_type, routing_key=self.routing_key, auto_delete=self.auto_delete, durable=self.durable) queue = Queue(self.queue, exchange=exchange, routing_key=self.routing_key, durable=self.durable, exclusive=self.exclusive, auto_delete=self.auto_delete) super(Consumer, self).__init__(self.backend, queue, **kwargs) def revive(self, channel): self.backend = channel super(Consumer, self).revive(channel) def close(self): self.cancel() self.backend.close() self._closed = True def __enter__(self): return self def __exit__(self, *exc_info): self.close() def __iter__(self): return self.iterqueue(infinite=True) def fetch(self, no_ack=None, enable_callbacks=False): if no_ack is None: no_ack = self.no_ack message = self.queues[0].get(no_ack) if message: if enable_callbacks: self.receive(message.payload, message) return message def process_next(self): raise NotImplementedError('Use fetch(enable_callbacks=True)') def discard_all(self, filterfunc=None): if filterfunc is not None: raise NotImplementedError( 'discard_all does not implement filters') return self.purge() def iterconsume(self, limit=None, no_ack=None): return _iterconsume(self.connection, self, no_ack, limit) def wait(self, limit=None): it = self.iterconsume(limit) return list(it) def iterqueue(self, limit=None, infinite=False): for items_since_start in count(): # for infinity item = self.fetch() if (not infinite and item is None) or \ (limit and items_since_start >= limit): break yield item class ConsumerSet(messaging.Consumer): def __init__(self, connection, from_dict=None, consumers=None, channel=None, **kwargs): if channel: self._provided_channel = True self.backend = channel else: self._provided_channel = False self.backend = connection.channel() queues = [] if consumers: for consumer in consumers: queues.extend(consumer.queues) if from_dict: for queue_name, queue_options in items(from_dict): queues.append(Queue.from_dict(queue_name, **queue_options)) super(ConsumerSet, self).__init__(self.backend, queues, **kwargs) def iterconsume(self, limit=None, no_ack=False): return _iterconsume(self.connection, self, no_ack, limit) def discard_all(self): return self.purge() def add_consumer_from_dict(self, queue, **options): return self.add_queue(Queue.from_dict(queue, **options)) def add_consumer(self, consumer): for queue in consumer.queues: self.add_queue(queue) def revive(self, channel): self.backend = channel super(ConsumerSet, self).revive(channel) def close(self): self.cancel() if not self._provided_channel: self.channel.close() kombu-4.1.0/kombu/transport/0000755000175000017500000000000013134154263015732 5ustar omeromer00000000000000kombu-4.1.0/kombu/transport/base.py0000644000175000017500000001624313130603207017215 0ustar omeromer00000000000000"""Base transport interface.""" from __future__ import absolute_import, unicode_literals import errno import socket from amqp.exceptions import RecoverableConnectionError from kombu.exceptions import ChannelError, ConnectionError from kombu.five import items from kombu.message import Message from kombu.utils.functional import dictfilter from kombu.utils.objects import cached_property from kombu.utils.time import maybe_s_to_ms __all__ = ['Message', 'StdChannel', 'Management', 'Transport'] RABBITMQ_QUEUE_ARGUMENTS = { # type: Mapping[str, Tuple[str, Callable]] 'expires': ('x-expires', maybe_s_to_ms), 'message_ttl': ('x-message-ttl', maybe_s_to_ms), 'max_length': ('x-max-length', int), 'max_length_bytes': ('x-max-length-bytes', int), 'max_priority': ('x-max-priority', int), } def to_rabbitmq_queue_arguments(arguments, **options): # type: (Mapping, **Any) -> Dict """Convert queue arguments to RabbitMQ queue arguments. This is the implementation for Channel.prepare_queue_arguments for AMQP-based transports. It's used by both the pyamqp and librabbitmq transports. Arguments: arguments (Mapping): User-supplied arguments (``Queue.queue_arguments``). Keyword Arguments: expires (float): Queue expiry time in seconds. This will be converted to ``x-expires`` in int milliseconds. message_ttl (float): Message TTL in seconds. This will be converted to ``x-message-ttl`` in int milliseconds. max_length (int): Max queue length (in number of messages). This will be converted to ``x-max-length`` int. max_length_bytes (int): Max queue size in bytes. This will be converted to ``x-max-length-bytes`` int. max_priority (int): Max priority steps for queue. This will be converted to ``x-max-priority`` int. Returns: Dict: RabbitMQ compatible queue arguments. """ prepared = dictfilter(dict( _to_rabbitmq_queue_argument(key, value) for key, value in items(options) )) return dict(arguments, **prepared) if prepared else arguments def _to_rabbitmq_queue_argument(key, value): # type: (str, Any) -> Tuple[str, Any] opt, typ = RABBITMQ_QUEUE_ARGUMENTS[key] return opt, typ(value) if value is not None else value def _LeftBlank(obj, method): return NotImplementedError( 'Transport {0.__module__}.{0.__name__} does not implement {1}'.format( obj.__class__, method)) class StdChannel(object): """Standard channel base class.""" no_ack_consumers = None def Consumer(self, *args, **kwargs): from kombu.messaging import Consumer return Consumer(self, *args, **kwargs) def Producer(self, *args, **kwargs): from kombu.messaging import Producer return Producer(self, *args, **kwargs) def get_bindings(self): raise _LeftBlank(self, 'get_bindings') def after_reply_message_received(self, queue): """Callback called after RPC reply received. Notes: Reply queue semantics: can be used to delete the queue after transient reply message received. """ pass def prepare_queue_arguments(self, arguments, **kwargs): return arguments def __enter__(self): return self def __exit__(self, *exc_info): self.close() class Management(object): """AMQP Management API (incomplete).""" def __init__(self, transport): self.transport = transport def get_bindings(self): raise _LeftBlank(self, 'get_bindings') class Implements(dict): """Helper class used to define transport features.""" def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): self[key] = value def extend(self, **kwargs): return self.__class__(self, **kwargs) default_transport_capabilities = Implements( async=False, exchange_type=frozenset(['direct', 'topic', 'fanout', 'headers']), heartbeats=False, ) class Transport(object): """Base class for transports.""" Management = Management #: The :class:`~kombu.Connection` owning this instance. client = None #: Set to True if :class:`~kombu.Connection` should pass the URL #: unmodified. can_parse_url = False #: Default port used when no port has been specified. default_port = None #: Tuple of errors that can happen due to connection failure. connection_errors = (ConnectionError,) #: Tuple of errors that can happen due to channel/method failure. channel_errors = (ChannelError,) #: Type of driver, can be used to separate transports #: using the AMQP protocol (driver_type: 'amqp'), #: Redis (driver_type: 'redis'), etc... driver_type = 'N/A' #: Name of driver library (e.g. 'py-amqp', 'redis'). driver_name = 'N/A' __reader = None implements = default_transport_capabilities.extend() def __init__(self, client, **kwargs): self.client = client def establish_connection(self): raise _LeftBlank(self, 'establish_connection') def close_connection(self, connection): raise _LeftBlank(self, 'close_connection') def create_channel(self, connection): raise _LeftBlank(self, 'create_channel') def close_channel(self, connection): raise _LeftBlank(self, 'close_channel') def drain_events(self, connection, **kwargs): raise _LeftBlank(self, 'drain_events') def heartbeat_check(self, connection, rate=2): pass def driver_version(self): return 'N/A' def get_heartbeat_interval(self, connection): return 0 def register_with_event_loop(self, connection, loop): pass def unregister_from_event_loop(self, connection, loop): pass def verify_connection(self, connection): return True def _make_reader(self, connection, timeout=socket.timeout, error=socket.error, _unavail=(errno.EAGAIN, errno.EINTR)): drain_events = connection.drain_events def _read(loop): if not connection.connected: raise RecoverableConnectionError('Socket was disconnected') try: drain_events(timeout=0) except timeout: return except error as exc: if exc.errno in _unavail: return raise loop.call_soon(_read, loop) return _read def qos_semantics_matches_spec(self, connection): return True def on_readable(self, connection, loop): reader = self.__reader if reader is None: reader = self.__reader = self._make_reader(connection) reader(loop) @property def default_connection_params(self): return {} def get_manager(self, *args, **kwargs): return self.Management(self) @cached_property def manager(self): return self.get_manager() @property def supports_heartbeats(self): return self.implements.heartbeats @property def supports_ev(self): return self.implements.async kombu-4.1.0/kombu/transport/filesystem.py0000644000175000017500000001313413130603207020463 0ustar omeromer00000000000000"""File-system Transport. Transport using the file-system as the message store. """ from __future__ import absolute_import, unicode_literals import os import shutil import uuid import tempfile from . import virtual from kombu.exceptions import ChannelError from kombu.five import Empty, monotonic from kombu.utils.encoding import bytes_to_str, str_to_bytes from kombu.utils.json import loads, dumps from kombu.utils.objects import cached_property VERSION = (1, 0, 0) __version__ = '.'.join(map(str, VERSION)) # needs win32all to work on Windows if os.name == 'nt': import win32con import win32file import pywintypes LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK # 0 is the default LOCK_SH = 0 # noqa LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY # noqa __overlapped = pywintypes.OVERLAPPED() def lock(file, flags): """Create file lock.""" hfile = win32file._get_osfhandle(file.fileno()) win32file.LockFileEx(hfile, flags, 0, 0xffff0000, __overlapped) def unlock(file): """Remove file lock.""" hfile = win32file._get_osfhandle(file.fileno()) win32file.UnlockFileEx(hfile, 0, 0xffff0000, __overlapped) elif os.name == 'posix': import fcntl from fcntl import LOCK_EX, LOCK_SH, LOCK_NB # noqa def lock(file, flags): # noqa """Create file lock.""" fcntl.flock(file.fileno(), flags) def unlock(file): # noqa """Remove file lock.""" fcntl.flock(file.fileno(), fcntl.LOCK_UN) else: raise RuntimeError( 'Filesystem plugin only defined for NT and POSIX platforms') class Channel(virtual.Channel): """Filesystem Channel.""" def _put(self, queue, payload, **kwargs): """Put `message` onto `queue`.""" filename = '%s_%s.%s.msg' % (int(round(monotonic() * 1000)), uuid.uuid4(), queue) filename = os.path.join(self.data_folder_out, filename) try: f = open(filename, 'wb') lock(f, LOCK_EX) f.write(str_to_bytes(dumps(payload))) except (IOError, OSError): raise ChannelError( 'Cannot add file {0!r} to directory'.format(filename)) finally: unlock(f) f.close() def _get(self, queue): """Get next message from `queue`.""" queue_find = '.' + queue + '.msg' folder = os.listdir(self.data_folder_in) folder = sorted(folder) while len(folder) > 0: filename = folder.pop(0) # only handle message for the requested queue if filename.find(queue_find) < 0: continue if self.store_processed: processed_folder = self.processed_folder else: processed_folder = tempfile.gettempdir() try: # move the file to the tmp/processed folder shutil.move(os.path.join(self.data_folder_in, filename), processed_folder) except IOError: pass # file could be locked, or removed in meantime so ignore filename = os.path.join(processed_folder, filename) try: f = open(filename, 'rb') payload = f.read() f.close() if not self.store_processed: os.remove(filename) except (IOError, OSError): raise ChannelError( 'Cannot read file {0!r} from queue.'.format(filename)) return loads(bytes_to_str(payload)) raise Empty() def _purge(self, queue): """Remove all messages from `queue`.""" count = 0 queue_find = '.' + queue + '.msg' folder = os.listdir(self.data_folder_in) while len(folder) > 0: filename = folder.pop() try: # only purge messages for the requested queue if filename.find(queue_find) < 0: continue filename = os.path.join(self.data_folder_in, filename) os.remove(filename) count += 1 except OSError: # we simply ignore its existence, as it was probably # processed by another worker pass return count def _size(self, queue): """Return the number of messages in `queue` as an :class:`int`.""" count = 0 queue_find = '.{0}.msg'.format(queue) folder = os.listdir(self.data_folder_in) while len(folder) > 0: filename = folder.pop() # only handle message for the requested queue if filename.find(queue_find) < 0: continue count += 1 return count @property def transport_options(self): return self.connection.client.transport_options @cached_property def data_folder_in(self): return self.transport_options.get('data_folder_in', 'data_in') @cached_property def data_folder_out(self): return self.transport_options.get('data_folder_out', 'data_out') @cached_property def store_processed(self): return self.transport_options.get('store_processed', False) @cached_property def processed_folder(self): return self.transport_options.get('processed_folder', 'processed') class Transport(virtual.Transport): """Filesystem Transport.""" Channel = Channel default_port = 0 driver_type = 'filesystem' driver_name = 'filesystem' def driver_version(self): return 'N/A' kombu-4.1.0/kombu/transport/sqlalchemy/0000755000175000017500000000000013134154263020074 5ustar omeromer00000000000000kombu-4.1.0/kombu/transport/sqlalchemy/__init__.py0000644000175000017500000001222413130603207022177 0ustar omeromer00000000000000"""Kombu transport using SQLAlchemy as the message store.""" # SQLAlchemy overrides != False to have special meaning and pep8 complains # flake8: noqa from __future__ import absolute_import, unicode_literals from json import loads, dumps from sqlalchemy import create_engine from sqlalchemy.exc import OperationalError from sqlalchemy.orm import sessionmaker from kombu.five import Empty from kombu.transport import virtual from kombu.utils import cached_property from kombu.utils.encoding import bytes_to_str from .models import (ModelBase, Queue as QueueBase, Message as MessageBase, class_registry, metadata) VERSION = (1, 1, 0) __version__ = '.'.join(map(str, VERSION)) class Channel(virtual.Channel): """The channel class.""" _session = None _engines = {} # engine cache def __init__(self, connection, **kwargs): self._configure_entity_tablenames(connection.client.transport_options) super(Channel, self).__init__(connection, **kwargs) def _configure_entity_tablenames(self, opts): self.queue_tablename = opts.get('queue_tablename', 'kombu_queue') self.message_tablename = opts.get('message_tablename', 'kombu_message') # # Define the model definitions. This registers the declarative # classes with the active SQLAlchemy metadata object. This *must* be # done prior to the ``create_engine`` call. # self.queue_cls and self.message_cls def _engine_from_config(self): conninfo = self.connection.client transport_options = conninfo.transport_options.copy() transport_options.pop('queue_tablename', None) transport_options.pop('message_tablename', None) return create_engine(conninfo.hostname, **transport_options) def _open(self): conninfo = self.connection.client if conninfo.hostname not in self._engines: engine = self._engine_from_config() Session = sessionmaker(bind=engine) metadata.create_all(engine) self._engines[conninfo.hostname] = engine, Session return self._engines[conninfo.hostname] @property def session(self): if self._session is None: _, Session = self._open() self._session = Session() return self._session def _get_or_create(self, queue): obj = self.session.query(self.queue_cls) \ .filter(self.queue_cls.name == queue).first() if not obj: obj = self.queue_cls(queue) self.session.add(obj) try: self.session.commit() except OperationalError: self.session.rollback() return obj def _new_queue(self, queue, **kwargs): self._get_or_create(queue) def _put(self, queue, payload, **kwargs): obj = self._get_or_create(queue) message = self.message_cls(dumps(payload), obj) self.session.add(message) try: self.session.commit() except OperationalError: self.session.rollback() def _get(self, queue): obj = self._get_or_create(queue) if self.session.bind.name == 'sqlite': self.session.execute('BEGIN IMMEDIATE TRANSACTION') try: msg = self.session.query(self.message_cls) \ .with_lockmode('update') \ .filter(self.message_cls.queue_id == obj.id) \ .filter(self.message_cls.visible != False) \ .order_by(self.message_cls.sent_at) \ .order_by(self.message_cls.id) \ .limit(1) \ .first() if msg: msg.visible = False return loads(bytes_to_str(msg.payload)) raise Empty() finally: self.session.commit() def _query_all(self, queue): obj = self._get_or_create(queue) return self.session.query(self.message_cls) \ .filter(self.message_cls.queue_id == obj.id) def _purge(self, queue): count = self._query_all(queue).delete(synchronize_session=False) try: self.session.commit() except OperationalError: self.session.rollback() return count def _size(self, queue): return self._query_all(queue).count() def _declarative_cls(self, name, base, ns): if name in class_registry: return class_registry[name] return type(str(name), (base, ModelBase), ns) @cached_property def queue_cls(self): return self._declarative_cls( 'Queue', QueueBase, {'__tablename__': self.queue_tablename} ) @cached_property def message_cls(self): return self._declarative_cls( 'Message', MessageBase, {'__tablename__': self.message_tablename} ) class Transport(virtual.Transport): """The transport class.""" Channel = Channel can_parse_url = True default_port = 0 driver_type = 'sql' driver_name = 'sqlalchemy' connection_errors = (OperationalError, ) def driver_version(self): import sqlalchemy return sqlalchemy.__version__ kombu-4.1.0/kombu/transport/sqlalchemy/models.py0000644000175000017500000000406213130603207021724 0ustar omeromer00000000000000"""Kombu transport using SQLAlchemy as the message store.""" from __future__ import absolute_import, unicode_literals import datetime from sqlalchemy import (Column, Integer, String, Text, DateTime, Sequence, Boolean, ForeignKey, SmallInteger) from sqlalchemy.ext.declarative import declarative_base, declared_attr from sqlalchemy.orm import relation from sqlalchemy.schema import MetaData class_registry = {} metadata = MetaData() ModelBase = declarative_base(metadata=metadata, class_registry=class_registry) class Queue(object): """The queue class.""" __table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'} id = Column(Integer, Sequence('queue_id_sequence'), primary_key=True, autoincrement=True) name = Column(String(200), unique=True) def __init__(self, name): self.name = name def __str__(self): return ''.format(self=self) @declared_attr def messages(cls): return relation('Message', backref='queue', lazy='noload') class Message(object): """The message class.""" __table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'} id = Column(Integer, Sequence('message_id_sequence'), primary_key=True, autoincrement=True) visible = Column(Boolean, default=True, index=True) sent_at = Column('timestamp', DateTime, nullable=True, index=True, onupdate=datetime.datetime.now) payload = Column(Text, nullable=False) version = Column(SmallInteger, nullable=False, default=1) __mapper_args__ = {'version_id_col': version} def __init__(self, payload, queue): self.payload = payload self.queue = queue def __str__(self): return ''.format(self) @declared_attr def queue_id(self): return Column( Integer, ForeignKey( '%s.id' % class_registry['Queue'].__tablename__, name='FK_kombu_message_queue' ) ) kombu-4.1.0/kombu/transport/virtual/0000755000175000017500000000000013134154263017420 5ustar omeromer00000000000000kombu-4.1.0/kombu/transport/virtual/base.py0000644000175000017500000010100113130603207020666 0ustar omeromer00000000000000"""Virtual transport implementation. Emulates the AMQ API for non-AMQ transports. """ from __future__ import absolute_import, print_function, unicode_literals import base64 import socket import sys import warnings from array import array from collections import OrderedDict, defaultdict, namedtuple from itertools import count from multiprocessing.util import Finalize from time import sleep from amqp.protocol import queue_declare_ok_t from kombu.exceptions import ResourceError, ChannelError from kombu.five import Empty, items, monotonic from kombu.log import get_logger from kombu.utils.encoding import str_to_bytes, bytes_to_str from kombu.utils.div import emergency_dump_state from kombu.utils.scheduling import FairCycle from kombu.utils.uuid import uuid from kombu.transport import base from .exchange import STANDARD_EXCHANGE_TYPES ARRAY_TYPE_H = 'H' if sys.version_info[0] == 3 else b'H' UNDELIVERABLE_FMT = """\ Message could not be delivered: No queues bound to exchange {exchange!r} \ using binding key {routing_key!r}. """ NOT_EQUIVALENT_FMT = """\ Cannot redeclare exchange {0!r} in vhost {1!r} with \ different type, durable, autodelete or arguments value.\ """ W_NO_CONSUMERS = """\ Requeuing undeliverable message for queue %r: No consumers.\ """ RESTORING_FMT = 'Restoring {0!r} unacknowledged message(s)' RESTORE_PANIC_FMT = 'UNABLE TO RESTORE {0} MESSAGES: {1}' logger = get_logger(__name__) #: Key format used for queue argument lookups in BrokerState.bindings. binding_key_t = namedtuple('binding_key_t', ( 'queue', 'exchange', 'routing_key', )) #: BrokerState.queue_bindings generates tuples in this format. queue_binding_t = namedtuple('queue_binding_t', ( 'exchange', 'routing_key', 'arguments', )) class Base64(object): """Base64 codec.""" def encode(self, s): return bytes_to_str(base64.b64encode(str_to_bytes(s))) def decode(self, s): return base64.b64decode(str_to_bytes(s)) class NotEquivalentError(Exception): """Entity declaration is not equivalent to the previous declaration.""" class UndeliverableWarning(UserWarning): """The message could not be delivered to a queue.""" class BrokerState(object): """Broker state holds exchanges, queues and bindings.""" #: Mapping of exchange name to #: :class:`kombu.transport.virtual.exchange.ExchangeType` exchanges = None #: This is the actual bindings registry, used to store bindings and to #: test 'in' relationships in constant time. It has the following #: structure:: #: #: { #: (queue, exchange, routing_key): arguments, #: # ..., #: } bindings = None #: The queue index is used to access directly (constant time) #: all the bindings of a certain queue. It has the following structure:: #: { #: queue: { #: (queue, exchange, routing_key), #: # ..., #: }, #: # ..., #: } queue_index = None def __init__(self, exchanges=None): self.exchanges = {} if exchanges is None else exchanges self.bindings = {} self.queue_index = defaultdict(set) def clear(self): self.exchanges.clear() self.bindings.clear() self.queue_index.clear() def has_binding(self, queue, exchange, routing_key): return (queue, exchange, routing_key) in self.bindings def binding_declare(self, queue, exchange, routing_key, arguments): key = binding_key_t(queue, exchange, routing_key) self.bindings.setdefault(key, arguments) self.queue_index[queue].add(key) def binding_delete(self, queue, exchange, routing_key): key = binding_key_t(queue, exchange, routing_key) try: del self.bindings[key] except KeyError: pass else: self.queue_index[queue].remove(key) def queue_bindings_delete(self, queue): try: bindings = self.queue_index.pop(queue) except KeyError: pass else: [self.bindings.pop(binding, None) for binding in bindings] def queue_bindings(self, queue): return ( queue_binding_t(key.exchange, key.routing_key, self.bindings[key]) for key in self.queue_index[queue] ) class QoS(object): """Quality of Service guarantees. Only supports `prefetch_count` at this point. Arguments: channel (ChannelT): Connection channel. prefetch_count (int): Initial prefetch count (defaults to 0). """ #: current prefetch count value prefetch_count = 0 #: :class:`~collections.OrderedDict` of active messages. #: *NOTE*: Can only be modified by the consuming thread. _delivered = None #: acks can be done by other threads than the consuming thread. #: Instead of a mutex, which doesn't perform well here, we mark #: the delivery tags as dirty, so subsequent calls to append() can remove #: them. _dirty = None #: If disabled, unacked messages won't be restored at shutdown. restore_at_shutdown = True def __init__(self, channel, prefetch_count=0): self.channel = channel self.prefetch_count = prefetch_count or 0 self._delivered = OrderedDict() self._delivered.restored = False self._dirty = set() self._quick_ack = self._dirty.add self._quick_append = self._delivered.__setitem__ self._on_collect = Finalize( self, self.restore_unacked_once, exitpriority=1, ) def can_consume(self): """Return true if the channel can be consumed from. Used to ensure the client adhers to currently active prefetch limits. """ pcount = self.prefetch_count return not pcount or len(self._delivered) - len(self._dirty) < pcount def can_consume_max_estimate(self): """Return the maximum number of messages allowed to be returned. Returns an estimated number of messages that a consumer may be allowed to consume at once from the broker. This is used for services where bulk 'get message' calls are preferred to many individual 'get message' calls - like SQS. Returns: int: greater than zero. """ pcount = self.prefetch_count if pcount: return max(pcount - (len(self._delivered) - len(self._dirty)), 0) def append(self, message, delivery_tag): """Append message to transactional state.""" if self._dirty: self._flush() self._quick_append(delivery_tag, message) def get(self, delivery_tag): return self._delivered[delivery_tag] def _flush(self): """Flush dirty (acked/rejected) tags from.""" dirty = self._dirty delivered = self._delivered while 1: try: dirty_tag = dirty.pop() except KeyError: break delivered.pop(dirty_tag, None) def ack(self, delivery_tag): """Acknowledge message and remove from transactional state.""" self._quick_ack(delivery_tag) def reject(self, delivery_tag, requeue=False): """Remove from transactional state and requeue message.""" if requeue: self.channel._restore_at_beginning(self._delivered[delivery_tag]) self._quick_ack(delivery_tag) def restore_unacked(self): """Restore all unacknowledged messages.""" self._flush() delivered = self._delivered errors = [] restore = self.channel._restore pop_message = delivered.popitem while delivered: try: _, message = pop_message() except KeyError: # pragma: no cover break try: restore(message) except BaseException as exc: errors.append((exc, message)) delivered.clear() return errors def restore_unacked_once(self, stderr=None): """Restore all unacknowledged messages at shutdown/gc collect. Note: Can only be called once for each instance, subsequent calls will be ignored. """ self._on_collect.cancel() self._flush() stderr = sys.stderr if stderr is None else stderr state = self._delivered if not self.restore_at_shutdown or not self.channel.do_restore: return if getattr(state, 'restored', None): assert not state return try: if state: print(RESTORING_FMT.format(len(self._delivered)), file=stderr) unrestored = self.restore_unacked() if unrestored: errors, messages = list(zip(*unrestored)) print(RESTORE_PANIC_FMT.format(len(errors), errors), file=stderr) emergency_dump_state(messages, stderr=stderr) finally: state.restored = True def restore_visible(self, *args, **kwargs): """Restore any pending unackwnowledged messages. To be filled in for visibility_timeout style implementations. Note: This is implementation optional, and currently only used by the Redis transport. """ pass class Message(base.Message): """Message object.""" def __init__(self, payload, channel=None, **kwargs): self._raw = payload properties = payload['properties'] body = payload.get('body') if body: body = channel.decode_body(body, properties.get('body_encoding')) super(Message, self).__init__( body=body, channel=channel, delivery_tag=properties['delivery_tag'], content_type=payload.get('content-type'), content_encoding=payload.get('content-encoding'), headers=payload.get('headers'), properties=properties, delivery_info=properties.get('delivery_info'), postencode='utf-8', **kwargs) def serializable(self): props = self.properties body, _ = self.channel.encode_body(self.body, props.get('body_encoding')) headers = dict(self.headers) # remove compression header headers.pop('compression', None) return { 'body': body, 'properties': props, 'content-type': self.content_type, 'content-encoding': self.content_encoding, 'headers': headers, } class AbstractChannel(object): """Abstract channel interface. This is an abstract class defining the channel methods you'd usually want to implement in a virtual channel. Note: Do not subclass directly, but rather inherit from :class:`Channel`. """ def _get(self, queue, timeout=None): """Get next message from `queue`.""" raise NotImplementedError('Virtual channels must implement _get') def _put(self, queue, message): """Put `message` onto `queue`.""" raise NotImplementedError('Virtual channels must implement _put') def _purge(self, queue): """Remove all messages from `queue`.""" raise NotImplementedError('Virtual channels must implement _purge') def _size(self, queue): """Return the number of messages in `queue` as an :class:`int`.""" return 0 def _delete(self, queue, *args, **kwargs): """Delete `queue`. Note: This just purges the queue, if you need to do more you can override this method. """ self._purge(queue) def _new_queue(self, queue, **kwargs): """Create new queue. Note: Your transport can override this method if it needs to do something whenever a new queue is declared. """ pass def _has_queue(self, queue, **kwargs): """Verify that queue exists. Returns: bool: Should return :const:`True` if the queue exists or :const:`False` otherwise. """ return True def _poll(self, cycle, callback, timeout=None): """Poll a list of queues for available messages.""" return cycle.get(callback) def _get_and_deliver(self, queue, callback): message = self._get(queue) callback(message, queue) class Channel(AbstractChannel, base.StdChannel): """Virtual channel. Arguments: connection (ConnectionT): The transport instance this channel is part of. """ #: message class used. Message = Message #: QoS class used. QoS = QoS #: flag to restore unacked messages when channel #: goes out of scope. do_restore = True #: mapping of exchange types and corresponding classes. exchange_types = dict(STANDARD_EXCHANGE_TYPES) #: flag set if the channel supports fanout exchanges. supports_fanout = False #: Binary <-> ASCII codecs. codecs = {'base64': Base64()} #: Default body encoding. #: NOTE: ``transport_options['body_encoding']`` will override this value. body_encoding = 'base64' #: counter used to generate delivery tags for this channel. _delivery_tags = count(1) #: Optional queue where messages with no route is delivered. #: Set by ``transport_options['deadletter_queue']``. deadletter_queue = None # List of options to transfer from :attr:`transport_options`. from_transport_options = ('body_encoding', 'deadletter_queue') # Priority defaults default_priority = 0 min_priority = 0 max_priority = 9 def __init__(self, connection, **kwargs): self.connection = connection self._consumers = set() self._cycle = None self._tag_to_queue = {} self._active_queues = [] self._qos = None self.closed = False # instantiate exchange types self.exchange_types = dict( (typ, cls(self)) for typ, cls in items(self.exchange_types) ) try: self.channel_id = self.connection._avail_channel_ids.pop() except IndexError: raise ResourceError( 'No free channel ids, current={0}, channel_max={1}'.format( len(self.connection.channels), self.connection.channel_max), (20, 10), ) topts = self.connection.client.transport_options for opt_name in self.from_transport_options: try: setattr(self, opt_name, topts[opt_name]) except KeyError: pass def exchange_declare(self, exchange=None, type='direct', durable=False, auto_delete=False, arguments=None, nowait=False, passive=False): """Declare exchange.""" type = type or 'direct' exchange = exchange or 'amq.%s' % type if passive: if exchange not in self.state.exchanges: raise ChannelError( 'NOT_FOUND - no exchange {0!r} in vhost {1!r}'.format( exchange, self.connection.client.virtual_host or '/'), (50, 10), 'Channel.exchange_declare', '404', ) return try: prev = self.state.exchanges[exchange] if not self.typeof(exchange).equivalent(prev, exchange, type, durable, auto_delete, arguments): raise NotEquivalentError(NOT_EQUIVALENT_FMT.format( exchange, self.connection.client.virtual_host or '/')) except KeyError: self.state.exchanges[exchange] = { 'type': type, 'durable': durable, 'auto_delete': auto_delete, 'arguments': arguments or {}, 'table': [], } def exchange_delete(self, exchange, if_unused=False, nowait=False): """Delete `exchange` and all its bindings.""" for rkey, _, queue in self.get_table(exchange): self.queue_delete(queue, if_unused=True, if_empty=True) self.state.exchanges.pop(exchange, None) def queue_declare(self, queue=None, passive=False, **kwargs): """Declare queue.""" queue = queue or 'amq.gen-%s' % uuid() if passive and not self._has_queue(queue, **kwargs): raise ChannelError( 'NOT_FOUND - no queue {0!r} in vhost {1!r}'.format( queue, self.connection.client.virtual_host or '/'), (50, 10), 'Channel.queue_declare', '404', ) else: self._new_queue(queue, **kwargs) return queue_declare_ok_t(queue, self._size(queue), 0) def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): """Delete queue.""" if if_empty and self._size(queue): return for exchange, routing_key, args in self.state.queue_bindings(queue): meta = self.typeof(exchange).prepare_bind( queue, exchange, routing_key, args, ) self._delete(queue, exchange, *meta, **kwargs) self.state.queue_bindings_delete(queue) def after_reply_message_received(self, queue): self.queue_delete(queue) def exchange_bind(self, destination, source='', routing_key='', nowait=False, arguments=None): raise NotImplementedError('transport does not support exchange_bind') def exchange_unbind(self, destination, source='', routing_key='', nowait=False, arguments=None): raise NotImplementedError('transport does not support exchange_unbind') def queue_bind(self, queue, exchange=None, routing_key='', arguments=None, **kwargs): """Bind `queue` to `exchange` with `routing key`.""" exchange = exchange or 'amq.direct' if self.state.has_binding(queue, exchange, routing_key): return # Add binding: self.state.binding_declare(queue, exchange, routing_key, arguments) # Update exchange's routing table: table = self.state.exchanges[exchange].setdefault('table', []) meta = self.typeof(exchange).prepare_bind( queue, exchange, routing_key, arguments, ) table.append(meta) if self.supports_fanout: self._queue_bind(exchange, *meta) def queue_unbind(self, queue, exchange=None, routing_key='', arguments=None, **kwargs): # Remove queue binding: self.state.binding_delete(queue, exchange, routing_key) try: table = self.get_table(exchange) except KeyError: return binding_meta = self.typeof(exchange).prepare_bind( queue, exchange, routing_key, arguments, ) # TODO: the complexity of this operation is O(number of bindings). # Should be optimized. Modifying table in place. table[:] = [meta for meta in table if meta != binding_meta] def list_bindings(self): return ((queue, exchange, rkey) for exchange in self.state.exchanges for rkey, pattern, queue in self.get_table(exchange)) def queue_purge(self, queue, **kwargs): """Remove all ready messages from queue.""" return self._purge(queue) def _next_delivery_tag(self): return uuid() def basic_publish(self, message, exchange, routing_key, **kwargs): """Publish message.""" self._inplace_augment_message(message, exchange, routing_key) if exchange: return self.typeof(exchange).deliver( message, exchange, routing_key, **kwargs ) # anon exchange: routing_key is the destination queue return self._put(routing_key, message, **kwargs) def _inplace_augment_message(self, message, exchange, routing_key): message['body'], body_encoding = self.encode_body( message['body'], self.body_encoding, ) props = message['properties'] props.update( body_encoding=body_encoding, delivery_tag=self._next_delivery_tag(), ) props['delivery_info'].update( exchange=exchange, routing_key=routing_key, ) def basic_consume(self, queue, no_ack, callback, consumer_tag, **kwargs): """Consume from `queue`.""" self._tag_to_queue[consumer_tag] = queue self._active_queues.append(queue) def _callback(raw_message): message = self.Message(raw_message, channel=self) if not no_ack: self.qos.append(message, message.delivery_tag) return callback(message) self.connection._callbacks[queue] = _callback self._consumers.add(consumer_tag) self._reset_cycle() def basic_cancel(self, consumer_tag): """Cancel consumer by consumer tag.""" if consumer_tag in self._consumers: self._consumers.remove(consumer_tag) self._reset_cycle() queue = self._tag_to_queue.pop(consumer_tag, None) try: self._active_queues.remove(queue) except ValueError: pass self.connection._callbacks.pop(queue, None) def basic_get(self, queue, no_ack=False, **kwargs): """Get message by direct access (synchronous).""" try: message = self.Message(self._get(queue), channel=self) if not no_ack: self.qos.append(message, message.delivery_tag) return message except Empty: pass def basic_ack(self, delivery_tag, multiple=False): """Acknowledge message.""" self.qos.ack(delivery_tag) def basic_recover(self, requeue=False): """Recover unacked messages.""" if requeue: return self.qos.restore_unacked() raise NotImplementedError('Does not support recover(requeue=False)') def basic_reject(self, delivery_tag, requeue=False): """Reject message.""" self.qos.reject(delivery_tag, requeue=requeue) def basic_qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): """Change QoS settings for this channel. Note: Only `prefetch_count` is supported. """ self.qos.prefetch_count = prefetch_count def get_exchanges(self): return list(self.state.exchanges) def get_table(self, exchange): """Get table of bindings for `exchange`.""" return self.state.exchanges[exchange]['table'] def typeof(self, exchange, default='direct'): """Get the exchange type instance for `exchange`.""" try: type = self.state.exchanges[exchange]['type'] except KeyError: type = default return self.exchange_types[type] def _lookup(self, exchange, routing_key, default=None): """Find all queues matching `routing_key` for the given `exchange`. Returns: str: queue name -- must return the string `default` if no queues matched. """ if default is None: default = self.deadletter_queue if not exchange: # anon exchange return [routing_key or default] try: R = self.typeof(exchange).lookup( self.get_table(exchange), exchange, routing_key, default, ) except KeyError: R = [] if not R and default is not None: warnings.warn(UndeliverableWarning(UNDELIVERABLE_FMT.format( exchange=exchange, routing_key=routing_key)), ) self._new_queue(default) R = [default] return R def _restore(self, message): """Redeliver message to its original destination.""" delivery_info = message.delivery_info message = message.serializable() message['redelivered'] = True for queue in self._lookup( delivery_info['exchange'], delivery_info['routing_key']): self._put(queue, message) def _restore_at_beginning(self, message): return self._restore(message) def drain_events(self, timeout=None, callback=None): callback = callback or self.connection._deliver if self._consumers and self.qos.can_consume(): if hasattr(self, '_get_many'): return self._get_many(self._active_queues, timeout=timeout) return self._poll(self.cycle, callback, timeout=timeout) raise Empty() def message_to_python(self, raw_message): """Convert raw message to :class:`Message` instance.""" if not isinstance(raw_message, self.Message): return self.Message(payload=raw_message, channel=self) return raw_message def prepare_message(self, body, priority=None, content_type=None, content_encoding=None, headers=None, properties=None): """Prepare message data.""" properties = properties or {} properties.setdefault('delivery_info', {}) properties.setdefault('priority', priority or self.default_priority) return {'body': body, 'content-encoding': content_encoding, 'content-type': content_type, 'headers': headers or {}, 'properties': properties or {}} def flow(self, active=True): """Enable/disable message flow. Raises: NotImplementedError: as flow is not implemented by the base virtual implementation. """ raise NotImplementedError('virtual channels do not support flow.') def close(self): """Close channel. Cancel all consumers, and requeue unacked messages. """ if not self.closed: self.closed = True for consumer in list(self._consumers): self.basic_cancel(consumer) if self._qos: self._qos.restore_unacked_once() if self._cycle is not None: self._cycle.close() self._cycle = None if self.connection is not None: self.connection.close_channel(self) self.exchange_types = None def encode_body(self, body, encoding=None): if encoding: return self.codecs.get(encoding).encode(body), encoding return body, encoding def decode_body(self, body, encoding=None): if encoding: return self.codecs.get(encoding).decode(body) return body def _reset_cycle(self): self._cycle = FairCycle( self._get_and_deliver, self._active_queues, Empty) def __enter__(self): return self def __exit__(self, *exc_info): self.close() @property def state(self): """Broker state containing exchanges and bindings.""" return self.connection.state @property def qos(self): """:class:`QoS` manager for this channel.""" if self._qos is None: self._qos = self.QoS(self) return self._qos @property def cycle(self): if self._cycle is None: self._reset_cycle() return self._cycle def _get_message_priority(self, message, reverse=False): """Get priority from message. The value is limited to within a boundary of 0 to 9. Note: Higher value has more priority. """ try: priority = max( min(int(message['properties']['priority']), self.max_priority), self.min_priority, ) except (TypeError, ValueError, KeyError): priority = self.default_priority return (self.max_priority - priority) if reverse else priority class Management(base.Management): """Base class for the AMQP management API.""" def __init__(self, transport): super(Management, self).__init__(transport) self.channel = transport.client.channel() def get_bindings(self): return [dict(destination=q, source=e, routing_key=r) for q, e, r in self.channel.list_bindings()] def close(self): self.channel.close() class Transport(base.Transport): """Virtual transport. Arguments: client (kombu.Connection): The client this is a transport for. """ Channel = Channel Cycle = FairCycle Management = Management #: Global :class:`BrokerState` containing declared exchanges and bindings. state = BrokerState() #: :class:`~kombu.utils.scheduling.FairCycle` instance #: used to fairly drain events from channels (set by constructor). cycle = None #: port number used when no port is specified. default_port = None #: active channels. channels = None #: queue/callback map. _callbacks = None #: Time to sleep between unsuccessful polls. polling_interval = 1.0 #: Max number of channels channel_max = 65535 implements = base.Transport.implements.extend( async=False, exchange_type=frozenset(['direct', 'topic']), heartbeats=False, ) def __init__(self, client, **kwargs): self.client = client self.channels = [] self._avail_channels = [] self._callbacks = {} self.cycle = self.Cycle(self._drain_channel, self.channels, Empty) polling_interval = client.transport_options.get('polling_interval') if polling_interval is not None: self.polling_interval = polling_interval self._avail_channel_ids = array( ARRAY_TYPE_H, range(self.channel_max, 0, -1), ) def create_channel(self, connection): try: return self._avail_channels.pop() except IndexError: channel = self.Channel(connection) self.channels.append(channel) return channel def close_channel(self, channel): try: self._avail_channel_ids.append(channel.channel_id) try: self.channels.remove(channel) except ValueError: pass finally: channel.connection = None def establish_connection(self): # creates channel to verify connection. # this channel is then used as the next requested channel. # (returned by ``create_channel``). self._avail_channels.append(self.create_channel(self)) return self # for drain events def close_connection(self, connection): self.cycle.close() for l in self._avail_channels, self.channels: while l: try: channel = l.pop() except LookupError: # pragma: no cover pass else: channel.close() def drain_events(self, connection, timeout=None): time_start = monotonic() get = self.cycle.get polling_interval = self.polling_interval while 1: try: get(self._deliver, timeout=timeout) except Empty: if timeout is not None and monotonic() - time_start >= timeout: raise socket.timeout() if polling_interval is not None: sleep(polling_interval) else: break def _deliver(self, message, queue): if not queue: raise KeyError( 'Received message without destination queue: {0}'.format( message)) try: callback = self._callbacks[queue] except KeyError: logger.warn(W_NO_CONSUMERS, queue) self._reject_inbound_message(message) else: callback(message) def _reject_inbound_message(self, raw_message): for channel in self.channels: if channel: message = channel.Message(raw_message, channel=channel) channel.qos.append(message, message.delivery_tag) channel.basic_reject(message.delivery_tag, requeue=True) break def on_message_ready(self, channel, message, queue): if not queue or queue not in self._callbacks: raise KeyError( 'Message for queue {0!r} without consumers: {1}'.format( queue, message)) self._callbacks[queue](message) def _drain_channel(self, channel, callback, timeout=None): return channel.drain_events(callback=callback, timeout=timeout) @property def default_connection_params(self): return {'port': self.default_port, 'hostname': 'localhost'} kombu-4.1.0/kombu/transport/virtual/exchange.py0000644000175000017500000001140313130603207021544 0ustar omeromer00000000000000"""Virtual AMQ Exchange. Implementations of the standard exchanges defined by the AMQ protocol (excluding the `headers` exchange). """ from __future__ import absolute_import, unicode_literals import re from kombu.utils.text import escape_regex class ExchangeType(object): """Base class for exchanges. Implements the specifics for an exchange type. Arguments: channel (ChannelT): AMQ Channel. """ type = None def __init__(self, channel): self.channel = channel def lookup(self, table, exchange, routing_key, default): """Lookup all queues matching `routing_key` in `exchange`. Returns: str: queue name, or 'default' if no queues matched. """ raise NotImplementedError('subclass responsibility') def prepare_bind(self, queue, exchange, routing_key, arguments): """Prepare queue-binding. Returns: Tuple[str, Pattern, str]: of `(routing_key, regex, queue)` to be stored for bindings to this exchange. """ return routing_key, None, queue def equivalent(self, prev, exchange, type, durable, auto_delete, arguments): """Return true if `prev` and `exchange` is equivalent.""" return (type == prev['type'] and durable == prev['durable'] and auto_delete == prev['auto_delete'] and (arguments or {}) == (prev['arguments'] or {})) class DirectExchange(ExchangeType): """Direct exchange. The `direct` exchange routes based on exact routing keys. """ type = 'direct' def lookup(self, table, exchange, routing_key, default): return { queue for rkey, _, queue in table if rkey == routing_key } def deliver(self, message, exchange, routing_key, **kwargs): _lookup = self.channel._lookup _put = self.channel._put for queue in _lookup(exchange, routing_key): _put(queue, message, **kwargs) class TopicExchange(ExchangeType): """Topic exchange. The `topic` exchange routes messages based on words separated by dots, using wildcard characters ``*`` (any single word), and ``#`` (one or more words). """ type = 'topic' #: map of wildcard to regex conversions wildcards = {'*': r'.*?[^\.]', '#': r'.*?'} #: compiled regex cache _compiled = {} def lookup(self, table, exchange, routing_key, default): return { queue for rkey, pattern, queue in table if self._match(pattern, routing_key) } def deliver(self, message, exchange, routing_key, **kwargs): _lookup = self.channel._lookup _put = self.channel._put deadletter = self.channel.deadletter_queue for queue in [q for q in _lookup(exchange, routing_key) if q and q != deadletter]: _put(queue, message, **kwargs) def prepare_bind(self, queue, exchange, routing_key, arguments): return routing_key, self.key_to_pattern(routing_key), queue def key_to_pattern(self, rkey): """Get the corresponding regex for any routing key.""" return '^%s$' % (r'\.'.join( self.wildcards.get(word, word) for word in escape_regex(rkey, '.#*').split('.') )) def _match(self, pattern, string): """Match regular expression (cached). Same as :func:`re.match`, except the regex is compiled and cached, then reused on subsequent matches with the same pattern. """ try: compiled = self._compiled[pattern] except KeyError: compiled = self._compiled[pattern] = re.compile(pattern, re.U) return compiled.match(string) class FanoutExchange(ExchangeType): """Fanout exchange. The `fanout` exchange implements broadcast messaging by delivering copies of all messages to all queues bound to the exchange. To support fanout the virtual channel needs to store the table as shared state. This requires that the `Channel.supports_fanout` attribute is set to true, and the `Channel._queue_bind` and `Channel.get_table` methods are implemented. See Also: the redis backend for an example implementation of these methods. """ type = 'fanout' def lookup(self, table, exchange, routing_key, default): return {queue for _, _, queue in table} def deliver(self, message, exchange, routing_key, **kwargs): if self.channel.supports_fanout: self.channel._put_fanout( exchange, message, routing_key, **kwargs) #: Map of standard exchange types and corresponding classes. STANDARD_EXCHANGE_TYPES = { 'direct': DirectExchange, 'topic': TopicExchange, 'fanout': FanoutExchange, } kombu-4.1.0/kombu/transport/virtual/__init__.py0000644000175000017500000000073313130603207021525 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from .base import ( Base64, NotEquivalentError, UndeliverableWarning, BrokerState, QoS, Message, AbstractChannel, Channel, Management, Transport, Empty, binding_key_t, queue_binding_t, ) __all__ = [ 'Base64', 'NotEquivalentError', 'UndeliverableWarning', 'BrokerState', 'QoS', 'Message', 'AbstractChannel', 'Channel', 'Management', 'Transport', 'Empty', 'binding_key_t', 'queue_binding_t', ] kombu-4.1.0/kombu/transport/__init__.py0000644000175000017500000000570013130603207020036 0ustar omeromer00000000000000"""Built-in transports.""" from __future__ import absolute_import, unicode_literals from kombu.five import string_t from kombu.utils.compat import _detect_environment from kombu.utils.imports import symbol_by_name def supports_librabbitmq(): """Return true if :pypi:`librabbitmq` can be used.""" if _detect_environment() == 'default': try: import librabbitmq # noqa except ImportError: # pragma: no cover pass else: # pragma: no cover return True TRANSPORT_ALIASES = { 'amqp': 'kombu.transport.pyamqp:Transport', 'amqps': 'kombu.transport.pyamqp:SSLTransport', 'pyamqp': 'kombu.transport.pyamqp:Transport', 'librabbitmq': 'kombu.transport.librabbitmq:Transport', 'memory': 'kombu.transport.memory:Transport', 'redis': 'kombu.transport.redis:Transport', 'SQS': 'kombu.transport.SQS:Transport', 'sqs': 'kombu.transport.SQS:Transport', 'mongodb': 'kombu.transport.mongodb:Transport', 'zookeeper': 'kombu.transport.zookeeper:Transport', 'sqlalchemy': 'kombu.transport.sqlalchemy:Transport', 'sqla': 'kombu.transport.sqlalchemy:Transport', 'SLMQ': 'kombu.transport.SLMQ.Transport', 'slmq': 'kombu.transport.SLMQ.Transport', 'filesystem': 'kombu.transport.filesystem:Transport', 'qpid': 'kombu.transport.qpid:Transport', 'sentinel': 'kombu.transport.redis:SentinelTransport', 'consul': 'kombu.transport.consul:Transport', 'etcd': 'kombu.transport.etcd:Transport', } _transport_cache = {} def resolve_transport(transport=None): """Get transport by name. Arguments: transport (Union[str, type]): This can be either an actual transport class, or the fully qualified path to a transport class, or the alias of a transport. """ if isinstance(transport, string_t): try: transport = TRANSPORT_ALIASES[transport] except KeyError: if '.' not in transport and ':' not in transport: from kombu.utils.text import fmatch_best alt = fmatch_best(transport, TRANSPORT_ALIASES) if alt: raise KeyError( 'No such transport: {0}. Did you mean {1}?'.format( transport, alt)) raise KeyError('No such transport: {0}'.format(transport)) else: if callable(transport): transport = transport() return symbol_by_name(transport) return transport def get_transport_cls(transport=None): """Get transport class by name. The transport string is the full path to a transport class, e.g.:: "kombu.transport.pyamqp:Transport" If the name does not include `"."` (is not fully qualified), the alias table will be consulted. """ if transport not in _transport_cache: _transport_cache[transport] = resolve_transport(transport) return _transport_cache[transport] kombu-4.1.0/kombu/transport/pyamqp.py0000644000175000017500000001317713130603207017615 0ustar omeromer00000000000000"""Pure-Python amqp transport.""" from __future__ import absolute_import, unicode_literals import amqp from kombu.five import items from kombu.utils.amq_manager import get_manager from kombu.utils.text import version_string_as_tuple from . import base from .base import to_rabbitmq_queue_arguments DEFAULT_PORT = 5672 DEFAULT_SSL_PORT = 5671 class Message(base.Message): """AMQP Message.""" def __init__(self, msg, channel=None, **kwargs): props = msg.properties super(Message, self).__init__( body=msg.body, channel=channel, delivery_tag=msg.delivery_tag, content_type=props.get('content_type'), content_encoding=props.get('content_encoding'), delivery_info=msg.delivery_info, properties=msg.properties, headers=props.get('application_headers') or {}, **kwargs) class Channel(amqp.Channel, base.StdChannel): """AMQP Channel.""" Message = Message def prepare_message(self, body, priority=None, content_type=None, content_encoding=None, headers=None, properties=None, _Message=amqp.Message): """Prepare message so that it can be sent using this transport.""" return _Message( body, priority=priority, content_type=content_type, content_encoding=content_encoding, application_headers=headers, **properties or {} ) def prepare_queue_arguments(self, arguments, **kwargs): return to_rabbitmq_queue_arguments(arguments, **kwargs) def message_to_python(self, raw_message): """Convert encoded message body back to a Python value.""" return self.Message(raw_message, channel=self) class Connection(amqp.Connection): """AMQP Connection.""" Channel = Channel class Transport(base.Transport): """AMQP Transport.""" Connection = Connection default_port = DEFAULT_PORT default_ssl_port = DEFAULT_SSL_PORT # it's very annoying that pyamqp sometimes raises AttributeError # if the connection is lost, but nothing we can do about that here. connection_errors = amqp.Connection.connection_errors channel_errors = amqp.Connection.channel_errors recoverable_connection_errors = \ amqp.Connection.recoverable_connection_errors recoverable_channel_errors = amqp.Connection.recoverable_channel_errors driver_name = 'py-amqp' driver_type = 'amqp' implements = base.Transport.implements.extend( async=True, heartbeats=True, ) def __init__(self, client, default_port=None, default_ssl_port=None, **kwargs): self.client = client self.default_port = default_port or self.default_port self.default_ssl_port = default_ssl_port or self.default_ssl_port def driver_version(self): return amqp.__version__ def create_channel(self, connection): return connection.channel() def drain_events(self, connection, **kwargs): return connection.drain_events(**kwargs) def _collect(self, connection): if connection is not None: connection.collect() def establish_connection(self): """Establish connection to the AMQP broker.""" conninfo = self.client for name, default_value in items(self.default_connection_params): if not getattr(conninfo, name, None): setattr(conninfo, name, default_value) if conninfo.hostname == 'localhost': conninfo.hostname = '127.0.0.1' opts = dict({ 'host': conninfo.host, 'userid': conninfo.userid, 'password': conninfo.password, 'login_method': conninfo.login_method, 'virtual_host': conninfo.virtual_host, 'insist': conninfo.insist, 'ssl': conninfo.ssl, 'connect_timeout': conninfo.connect_timeout, 'heartbeat': conninfo.heartbeat, }, **conninfo.transport_options or {}) conn = self.Connection(**opts) conn.client = self.client conn.connect() return conn def verify_connection(self, connection): return connection.connected def close_connection(self, connection): """Close the AMQP broker connection.""" connection.client = None connection.close() def get_heartbeat_interval(self, connection): return connection.heartbeat def register_with_event_loop(self, connection, loop): connection.transport.raise_on_initial_eintr = True loop.add_reader(connection.sock, self.on_readable, connection, loop) def heartbeat_check(self, connection, rate=2): return connection.heartbeat_tick(rate=rate) def qos_semantics_matches_spec(self, connection): props = connection.server_properties if props.get('product') == 'RabbitMQ': return version_string_as_tuple(props['version']) < (3, 3) return True @property def default_connection_params(self): return { 'userid': 'guest', 'password': 'guest', 'port': (self.default_ssl_port if self.client.ssl else self.default_port), 'hostname': 'localhost', 'login_method': 'AMQPLAIN', } def get_manager(self, *args, **kwargs): return get_manager(self.client, *args, **kwargs) class SSLTransport(Transport): """AMQP SSL Transport.""" def __init__(self, *args, **kwargs): super(SSLTransport, self).__init__(*args, **kwargs) # ugh, not exactly pure, but hey, it's python. self.client.ssl = True kombu-4.1.0/kombu/transport/etcd.py0000644000175000017500000002013013130603207017210 0ustar omeromer00000000000000"""Etcd Transport. It uses Etcd as a store to transport messages in Queues It uses python-etcd for talking to Etcd's HTTP API """ from __future__ import absolute_import, unicode_literals import os import socket from collections import defaultdict from contextlib import contextmanager from kombu.exceptions import ChannelError from kombu.five import Empty from kombu.log import get_logger from kombu.utils.json import loads, dumps from kombu.utils.objects import cached_property from . import virtual try: import etcd except ImportError: etcd = None logger = get_logger('kombu.transport.etcd') DEFAULT_PORT = 2379 DEFAULT_HOST = 'localhost' class Channel(virtual.Channel): """Etcd Channel class which talks to the Etcd.""" prefix = 'kombu' index = None timeout = 10 session_ttl = 30 lock_ttl = 10 def __init__(self, *args, **kwargs): if etcd is None: raise ImportError('Missing python-etcd library') super(Channel, self).__init__(*args, **kwargs) port = self.connection.client.port or self.connection.default_port host = self.connection.client.hostname or DEFAULT_HOST logger.debug('Host: %s Port: %s Timeout: %s', host, port, self.timeout) self.queues = defaultdict(dict) self.client = etcd.Client(host=host, port=int(port)) def _key_prefix(self, queue): """Create and return the `queue` with the proper prefix. Arguments: queue (str): The name of the queue. """ return '{0}/{1}'.format(self.prefix, queue) @contextmanager def _queue_lock(self, queue): """Try to acquire a lock on the Queue. It does so by creating a object called 'lock' which is locked by the current session.. This way other nodes are not able to write to the lock object which means that they have to wait before the lock is released. Arguments: queue (str): The name of the queue. """ lock = etcd.Lock(self.client, queue) lock._uuid = self.lock_value logger.debug('Acquiring lock {0}'.format(lock.name)) lock.acquire(blocking=True, lock_ttl=self.lock_ttl) try: yield finally: logger.debug('Releasing lock {0}'.format(lock.name)) lock.release() def _new_queue(self, queue, **_): """Create a new `queue` if the `queue` doesn't already exist. Arguments: queue (str): The name of the queue. """ self.queues[queue] = queue with self._queue_lock(queue): try: return self.client.write( key=self._key_prefix(queue), dir=True, value=None) except etcd.EtcdNotFile: logger.debug('Queue "{0}" already exists'.format(queue)) return self.client.read(key=self._key_prefix(queue)) def _has_queue(self, queue, **kwargs): """Verify that queue exists. Returns: bool: Should return :const:`True` if the queue exists or :const:`False` otherwise. """ try: self.client.read(self._key_prefix(queue)) return True except etcd.EtcdKeyNotFound: return False def _delete(self, queue, *args, **_): """Delete a `queue`. Arguments: queue (str): The name of the queue. """ self.queues.pop(queue, None) self._purge(queue) def _put(self, queue, payload, **_): """Put `message` onto `queue`. This simply writes a key to the Etcd store Arguments: queue (str): The name of the queue. payload (dict): Message data which will be dumped to etcd. """ with self._queue_lock(queue): key = self._key_prefix(queue) if not self.client.write( key=key, value=dumps(payload), append=True): raise ChannelError('Cannot add key {0!r} to etcd'.format(key)) def _get(self, queue, timeout=None): """Get the first available message from the queue. Before it does so it acquires a lock on the store so only one node reads at the same time. This is for read consistency Arguments: queue (str): The name of the queue. timeout (int): Optional seconds to wait for a response. """ with self._queue_lock(queue): key = self._key_prefix(queue) logger.debug('Fetching key %s with index %s', key, self.index) try: result = self.client.read( key=key, recursive=True, index=self.index, timeout=self.timeout) if result is None: raise Empty() item = result._children[-1] logger.debug('Removing key {0}'.format(item['key'])) msg_content = loads(item['value']) self.client.delete(key=item['key']) return msg_content except (TypeError, IndexError, etcd.EtcdException) as error: logger.debug('_get failed: {0}:{1}'.format(type(error), error)) raise Empty() def _purge(self, queue): """Remove all `message`s from a `queue`. Arguments: queue (str): The name of the queue. """ with self._queue_lock(queue): key = self._key_prefix(queue) logger.debug('Purging queue at key {0}'.format(key)) return self.client.delete(key=key, recursive=True) def _size(self, queue): """Return the size of the `queue`. Arguments: queue (str): The name of the queue. """ with self._queue_lock(queue): size = 0 try: key = self._key_prefix(queue) logger.debug('Fetching key recursively %s with index %s', key, self.index) result = self.client.read( key=key, recursive=True, index=self.index) size = len(result._children) except TypeError: pass logger.debug('Found %s keys under %s with index %s', size, key, self.index) return size @cached_property def lock_value(self): return '{0}.{1}'.format(socket.gethostname(), os.getpid()) class Transport(virtual.Transport): """Etcd storage Transport for Kombu.""" Channel = Channel default_port = DEFAULT_PORT driver_type = 'etcd' driver_name = 'python-etcd' polling_interval = 3 implements = virtual.Transport.implements.extend( exchange_type=frozenset(['direct'])) def __init__(self, *args, **kwargs): """Create a new instance of etcd.Transport.""" if etcd is None: raise ImportError('Missing python-etcd library') super(Transport, self).__init__(*args, **kwargs) self.connection_errors = ( virtual.Transport.connection_errors + (etcd.EtcdException, ) ) self.channel_errors = ( virtual.Transport.channel_errors + (etcd.EtcdException, ) ) def verify_connection(self, connection): """Verify the connection works.""" port = connection.client.port or self.default_port host = connection.client.hostname or DEFAULT_HOST logger.debug('Verify Etcd connection to %s:%s', host, port) try: etcd.Client(host=host, port=int(port)) return True except ValueError: pass return False def driver_version(self): """Return the version of the etcd library. .. note:: python-etcd has no __version__. This is a workaround. """ try: import pip.commands.freeze for x in pip.commands.freeze.freeze(): if x.startswith('python-etcd'): return x.split('==')[1] except (ImportError, IndexError): logger.warn('Unable to find the python-etcd version.') return 'Unknown' kombu-4.1.0/kombu/transport/memory.py0000644000175000017500000000353713130603207017615 0ustar omeromer00000000000000"""In-memory transport.""" from __future__ import absolute_import, unicode_literals from kombu.five import Queue, values from . import base from . import virtual class Channel(virtual.Channel): """In-memory Channel.""" queues = {} do_restore = False supports_fanout = True def _has_queue(self, queue, **kwargs): return queue in self.queues def _new_queue(self, queue, **kwargs): if queue not in self.queues: self.queues[queue] = Queue() def _get(self, queue, timeout=None): return self._queue_for(queue).get(block=False) def _queue_for(self, queue): if queue not in self.queues: self.queues[queue] = Queue() return self.queues[queue] def _queue_bind(self, *args): pass def _put_fanout(self, exchange, message, routing_key=None, **kwargs): for queue in self._lookup(exchange, routing_key): self._queue_for(queue).put(message) def _put(self, queue, message, **kwargs): self._queue_for(queue).put(message) def _size(self, queue): return self._queue_for(queue).qsize() def _delete(self, queue, *args, **kwargs): self.queues.pop(queue, None) def _purge(self, queue): q = self._queue_for(queue) size = q.qsize() q.queue.clear() return size def close(self): super(Channel, self).close() for queue in values(self.queues): queue.empty() self.queues = {} def after_reply_message_received(self, queue): pass class Transport(virtual.Transport): """In-memory Transport.""" Channel = Channel #: memory backend state is global. state = virtual.BrokerState() implements = base.Transport.implements driver_type = 'memory' driver_name = 'memory' def driver_version(self): return 'N/A' kombu-4.1.0/kombu/transport/zookeeper.py0000644000175000017500000001372413130603207020307 0ustar omeromer00000000000000"""Zookeeper transport. :copyright: (c) 2010 - 2013 by Mahendra M. :license: BSD, see LICENSE for more details. **Synopsis** Connects to a zookeeper node as :/ The becomes the base for all the other znodes. So we can use it like a vhost. This uses the built-in kazoo recipe for queues **References** - https://zookeeper.apache.org/doc/trunk/recipes.html#sc_recipes_Queues - https://kazoo.readthedocs.io/en/latest/api/recipe/queue.html **Limitations** This queue does not offer reliable consumption. An entry is removed from the queue prior to being processed. So if an error occurs, the consumer has to re-queue the item or it will be lost. """ from __future__ import absolute_import, unicode_literals import os import socket from kombu.five import Empty from kombu.utils.encoding import bytes_to_str, ensure_bytes from kombu.utils.json import dumps, loads from . import virtual try: import kazoo from kazoo.client import KazooClient from kazoo.recipe.queue import Queue KZ_CONNECTION_ERRORS = ( kazoo.exceptions.SystemErrorException, kazoo.exceptions.ConnectionLossException, kazoo.exceptions.MarshallingErrorException, kazoo.exceptions.UnimplementedException, kazoo.exceptions.OperationTimeoutException, kazoo.exceptions.NoAuthException, kazoo.exceptions.InvalidACLException, kazoo.exceptions.AuthFailedException, kazoo.exceptions.SessionExpiredException, ) KZ_CHANNEL_ERRORS = ( kazoo.exceptions.RuntimeInconsistencyException, kazoo.exceptions.DataInconsistencyException, kazoo.exceptions.BadArgumentsException, kazoo.exceptions.MarshallingErrorException, kazoo.exceptions.UnimplementedException, kazoo.exceptions.OperationTimeoutException, kazoo.exceptions.ApiErrorException, kazoo.exceptions.NoNodeException, kazoo.exceptions.NoAuthException, kazoo.exceptions.NodeExistsException, kazoo.exceptions.NoChildrenForEphemeralsException, kazoo.exceptions.NotEmptyException, kazoo.exceptions.SessionExpiredException, kazoo.exceptions.InvalidCallbackException, socket.error, ) except ImportError: kazoo = None # noqa KZ_CONNECTION_ERRORS = KZ_CHANNEL_ERRORS = () # noqa DEFAULT_PORT = 2181 __author__ = 'Mahendra M ' class Channel(virtual.Channel): """Zookeeper Channel.""" _client = None _queues = {} def __init__(self, connection, **kwargs): super(Channel, self).__init__(connection, **kwargs) vhost = self.connection.client.virtual_host self._vhost = '/{}'.format(vhost.strip('/')) def _get_path(self, queue_name): return os.path.join(self._vhost, queue_name) def _get_queue(self, queue_name): queue = self._queues.get(queue_name, None) if queue is None: queue = Queue(self.client, self._get_path(queue_name)) self._queues[queue_name] = queue # Ensure that the queue is created len(queue) return queue def _put(self, queue, message, **kwargs): return self._get_queue(queue).put( ensure_bytes(dumps(message)), priority=self._get_message_priority(message, reverse=True), ) def _get(self, queue): queue = self._get_queue(queue) msg = queue.get() if msg is None: raise Empty() return loads(bytes_to_str(msg)) def _purge(self, queue): count = 0 queue = self._get_queue(queue) while True: msg = queue.get() if msg is None: break count += 1 return count def _delete(self, queue, *args, **kwargs): if self._has_queue(queue): self._purge(queue) self.client.delete(self._get_path(queue)) def _size(self, queue): queue = self._get_queue(queue) return len(queue) def _new_queue(self, queue, **kwargs): if not self._has_queue(queue): queue = self._get_queue(queue) def _has_queue(self, queue): return self.client.exists(self._get_path(queue)) is not None def _open(self): conninfo = self.connection.client hosts = [] if conninfo.alt: for host_port in conninfo.alt: if host_port.startswith('zookeeper://'): host_port = host_port[len('zookeeper://'):] if not host_port: continue try: host, port = host_port.split(':', 1) host_port = (host, int(port)) except ValueError: if host_port == conninfo.hostname: host_port = (host_port, conninfo.port or DEFAULT_PORT) else: host_port = (host_port, DEFAULT_PORT) hosts.append(host_port) host_port = (conninfo.hostname, conninfo.port or DEFAULT_PORT) if host_port not in hosts: hosts.insert(0, host_port) conn_str = ','.join(['%s:%s' % (h, p) for h, p in hosts]) conn = KazooClient(conn_str) conn.start() return conn @property def client(self): if self._client is None: self._client = self._open() return self._client class Transport(virtual.Transport): """Zookeeper Transport.""" Channel = Channel polling_interval = 1 default_port = DEFAULT_PORT connection_errors = ( virtual.Transport.connection_errors + KZ_CONNECTION_ERRORS ) channel_errors = ( virtual.Transport.channel_errors + KZ_CHANNEL_ERRORS ) driver_type = 'zookeeper' driver_name = 'kazoo' def __init__(self, *args, **kwargs): if kazoo is None: raise ImportError('The kazoo library is not installed') super(Transport, self).__init__(*args, **kwargs) def driver_version(self): return kazoo.__version__ kombu-4.1.0/kombu/transport/redis.py0000644000175000017500000011337213134153527017423 0ustar omeromer00000000000000"""Redis transport.""" from __future__ import absolute_import, unicode_literals import numbers import socket from bisect import bisect from collections import namedtuple from contextlib import contextmanager from time import time from vine import promise from kombu.exceptions import InconsistencyError, VersionMismatch from kombu.five import Empty, values, string_t from kombu.log import get_logger from kombu.utils.compat import register_after_fork from kombu.utils.eventio import poll, READ, ERR from kombu.utils.encoding import bytes_to_str from kombu.utils.json import loads, dumps from kombu.utils.objects import cached_property from kombu.utils.scheduling import cycle_by_name from kombu.utils.url import _parse_url from kombu.utils.uuid import uuid from . import virtual try: import redis except ImportError: # pragma: no cover redis = None # noqa try: from redis import sentinel except ImportError: # pragma: no cover sentinel = None # noqa logger = get_logger('kombu.transport.redis') crit, warn = logger.critical, logger.warn DEFAULT_PORT = 6379 DEFAULT_DB = 0 PRIORITY_STEPS = [0, 3, 6, 9] error_classes_t = namedtuple('error_classes_t', ( 'connection_errors', 'channel_errors', )) NO_ROUTE_ERROR = """ Cannot route message for exchange {0!r}: Table empty or key no longer exists. Probably the key ({1!r}) has been removed from the Redis database. """ # This implementation may seem overly complex, but I assure you there is # a good reason for doing it this way. # # Consuming from several connections enables us to emulate channels, # which means we can have different service guarantees for individual # channels. # # So we need to consume messages from multiple connections simultaneously, # and using epoll means we don't have to do so using multiple threads. # # Also it means we can easily use PUBLISH/SUBSCRIBE to do fanout # exchanges (broadcast), as an alternative to pushing messages to fanout-bound # queues manually. def get_redis_error_classes(): """Return tuple of redis error classes.""" from redis import exceptions # This exception suddenly changed name between redis-py versions if hasattr(exceptions, 'InvalidData'): DataError = exceptions.InvalidData else: DataError = exceptions.DataError return error_classes_t( (virtual.Transport.connection_errors + ( InconsistencyError, socket.error, IOError, OSError, exceptions.ConnectionError, exceptions.AuthenticationError, exceptions.TimeoutError)), (virtual.Transport.channel_errors + ( DataError, exceptions.InvalidResponse, exceptions.ResponseError)), ) def get_redis_ConnectionError(): """Return the redis ConnectionError exception class.""" from redis import exceptions return exceptions.ConnectionError class MutexHeld(Exception): """Raised when another party holds the lock.""" @contextmanager def Mutex(client, name, expire): """The Redis lock implementation (probably shaky).""" lock_id = uuid() i_won = client.setnx(name, lock_id) try: if i_won: client.expire(name, expire) yield else: if not client.ttl(name): client.expire(name, expire) raise MutexHeld() finally: if i_won: try: with client.pipeline(True) as pipe: pipe.watch(name) if pipe.get(name) == lock_id: pipe.multi() pipe.delete(name) pipe.execute() pipe.unwatch() except redis.WatchError: pass def _after_fork_cleanup_channel(channel): channel._after_fork() class QoS(virtual.QoS): """Redis Ack Emulation.""" restore_at_shutdown = True def __init__(self, *args, **kwargs): super(QoS, self).__init__(*args, **kwargs) self._vrestore_count = 0 def append(self, message, delivery_tag): delivery = message.delivery_info EX, RK = delivery['exchange'], delivery['routing_key'] with self.pipe_or_acquire() as pipe: pipe.zadd(self.unacked_index_key, time(), delivery_tag) \ .hset(self.unacked_key, delivery_tag, dumps([message._raw, EX, RK])) \ .execute() super(QoS, self).append(message, delivery_tag) def restore_unacked(self, client=None): with self.channel.conn_or_acquire(client) as client: for tag in self._delivered: self.restore_by_tag(tag, client=client) self._delivered.clear() def ack(self, delivery_tag): self._remove_from_indices(delivery_tag).execute() super(QoS, self).ack(delivery_tag) def reject(self, delivery_tag, requeue=False): if requeue: self.restore_by_tag(delivery_tag, leftmost=True) self.ack(delivery_tag) @contextmanager def pipe_or_acquire(self, pipe=None, client=None): if pipe: yield pipe else: with self.channel.conn_or_acquire(client) as client: yield client.pipeline() def _remove_from_indices(self, delivery_tag, pipe=None): with self.pipe_or_acquire(pipe) as pipe: return pipe.zrem(self.unacked_index_key, delivery_tag) \ .hdel(self.unacked_key, delivery_tag) def restore_visible(self, start=0, num=10, interval=10): self._vrestore_count += 1 if (self._vrestore_count - 1) % interval: return with self.channel.conn_or_acquire() as client: ceil = time() - self.visibility_timeout try: with Mutex(client, self.unacked_mutex_key, self.unacked_mutex_expire): visible = client.zrevrangebyscore( self.unacked_index_key, ceil, 0, start=num and start, num=num, withscores=True) for tag, score in visible or []: self.restore_by_tag(tag, client) except MutexHeld: pass def restore_by_tag(self, tag, client=None, leftmost=False): with self.channel.conn_or_acquire(client) as client: with client.pipeline() as pipe: p, _, _ = self._remove_from_indices( tag, pipe.hget(self.unacked_key, tag)).execute() if p: M, EX, RK = loads(bytes_to_str(p)) # json is unicode self.channel._do_restore_message(M, EX, RK, client, leftmost) @cached_property def unacked_key(self): return self.channel.unacked_key @cached_property def unacked_index_key(self): return self.channel.unacked_index_key @cached_property def unacked_mutex_key(self): return self.channel.unacked_mutex_key @cached_property def unacked_mutex_expire(self): return self.channel.unacked_mutex_expire @cached_property def visibility_timeout(self): return self.channel.visibility_timeout class MultiChannelPoller(object): """Async I/O poller for Redis transport.""" eventflags = READ | ERR #: Set by :meth:`get` while reading from the socket. _in_protected_read = False #: Set of one-shot callbacks to call after reading from socket. after_read = None def __init__(self): # active channels self._channels = set() # file descriptor -> channel map. self._fd_to_chan = {} # channel -> socket map self._chan_to_sock = {} # poll implementation (epoll/kqueue/select) self.poller = poll() # one-shot callbacks called after reading from socket. self.after_read = set() def close(self): for fd in values(self._chan_to_sock): try: self.poller.unregister(fd) except (KeyError, ValueError): pass self._channels.clear() self._fd_to_chan.clear() self._chan_to_sock.clear() def add(self, channel): self._channels.add(channel) def discard(self, channel): self._channels.discard(channel) def _on_connection_disconnect(self, connection): try: self.poller.unregister(connection._sock) except (AttributeError, TypeError): pass def _register(self, channel, client, type): if (channel, client, type) in self._chan_to_sock: self._unregister(channel, client, type) if client.connection._sock is None: # not connected yet. client.connection.connect() sock = client.connection._sock self._fd_to_chan[sock.fileno()] = (channel, type) self._chan_to_sock[(channel, client, type)] = sock self.poller.register(sock, self.eventflags) def _unregister(self, channel, client, type): self.poller.unregister(self._chan_to_sock[(channel, client, type)]) def _client_registered(self, channel, client, cmd): if getattr(client, 'connection', None) is None: client.connection = client.connection_pool.get_connection('_') return (client.connection._sock is not None and (channel, client, cmd) in self._chan_to_sock) def _register_BRPOP(self, channel): """Enable BRPOP mode for channel.""" ident = channel, channel.client, 'BRPOP' if not self._client_registered(channel, channel.client, 'BRPOP'): channel._in_poll = False self._register(*ident) if not channel._in_poll: # send BRPOP channel._brpop_start() def _register_LISTEN(self, channel): """Enable LISTEN mode for channel.""" if not self._client_registered(channel, channel.subclient, 'LISTEN'): channel._in_listen = False self._register(channel, channel.subclient, 'LISTEN') if not channel._in_listen: channel._subscribe() # send SUBSCRIBE def on_poll_start(self): for channel in self._channels: if channel.active_queues: # BRPOP mode? if channel.qos.can_consume(): self._register_BRPOP(channel) if channel.active_fanout_queues: # LISTEN mode? self._register_LISTEN(channel) def on_poll_init(self, poller): self.poller = poller for channel in self._channels: return channel.qos.restore_visible( num=channel.unacked_restore_limit, ) def maybe_restore_messages(self): for channel in self._channels: if channel.active_queues: # only need to do this once, as they are not local to channel. return channel.qos.restore_visible( num=channel.unacked_restore_limit, ) def on_readable(self, fileno): chan, type = self._fd_to_chan[fileno] if chan.qos.can_consume(): chan.handlers[type]() def handle_event(self, fileno, event): if event & READ: return self.on_readable(fileno), self elif event & ERR: chan, type = self._fd_to_chan[fileno] chan._poll_error(type) def get(self, callback, timeout=None): self._in_protected_read = True try: for channel in self._channels: if channel.active_queues: # BRPOP mode? if channel.qos.can_consume(): self._register_BRPOP(channel) if channel.active_fanout_queues: # LISTEN mode? self._register_LISTEN(channel) events = self.poller.poll(timeout) if events: for fileno, event in events: ret = self.handle_event(fileno, event) if ret: return # - no new data, so try to restore messages. # - reset active redis commands. self.maybe_restore_messages() raise Empty() finally: self._in_protected_read = False while self.after_read: try: fun = self.after_read.pop() except KeyError: break else: fun() @property def fds(self): return self._fd_to_chan class Channel(virtual.Channel): """Redis Channel.""" QoS = QoS _client = None _subclient = None _closing = False supports_fanout = True keyprefix_queue = '_kombu.binding.%s' keyprefix_fanout = '/{db}.' sep = '\x06\x16' _in_poll = False _in_listen = False _fanout_queues = {} ack_emulation = True unacked_key = 'unacked' unacked_index_key = 'unacked_index' unacked_mutex_key = 'unacked_mutex' unacked_mutex_expire = 300 # 5 minutes unacked_restore_limit = None visibility_timeout = 3600 # 1 hour priority_steps = PRIORITY_STEPS socket_timeout = None socket_connect_timeout = None socket_keepalive = None socket_keepalive_options = None max_connections = 10 #: Transport option to disable fanout keyprefix. #: Can also be string, in which case it changes the default #: prefix ('/{db}.') into to something else. The prefix must #: include a leading slash and a trailing dot. #: #: Enabled by default since Kombu 4.x. #: Disable for backwards compatibility with Kombu 3.x. fanout_prefix = True #: If enabled the fanout exchange will support patterns in routing #: and binding keys (like a topic exchange but using PUB/SUB). #: #: Enabled by default since Kombu 4.x. #: Disable for backwards compatibility with Kombu 3.x. fanout_patterns = True #: Order in which we consume from queues. #: #: Can be either string alias, or a cycle strategy class #: #: - ``round_robin`` #: (:class:`~kombu.utils.scheduling.round_robin_cycle`). #: #: Make sure each queue has an equal opportunity to be consumed from. #: #: - ``sorted`` #: (:class:`~kombu.utils.scheduling.sorted_cycle`). #: #: Consume from queues in alphabetical order. #: If the first queue in the sorted list always contains messages, #: then the rest of the queues will never be consumed from. #: #: - ``priority`` #: (:class:`~kombu.utils.scheduling.priority_cycle`). #: #: Consume from queues in original order, so that if the first #: queue always contains messages, the rest of the queues #: in the list will never be consumed from. #: #: The default is to consume from queues in round robin. queue_order_strategy = 'round_robin' _async_pool = None _pool = None from_transport_options = ( virtual.Channel.from_transport_options + ('ack_emulation', 'unacked_key', 'unacked_index_key', 'unacked_mutex_key', 'unacked_mutex_expire', 'visibility_timeout', 'unacked_restore_limit', 'fanout_prefix', 'fanout_patterns', 'socket_timeout', 'socket_connect_timeout', 'socket_keepalive', 'socket_keepalive_options', 'queue_order_strategy', 'max_connections', 'priority_steps') # <-- do not add comma here! ) connection_class = redis.Connection if redis else None def __init__(self, *args, **kwargs): super_ = super(Channel, self) super_.__init__(*args, **kwargs) if not self.ack_emulation: # disable visibility timeout self.QoS = virtual.QoS self._queue_cycle = cycle_by_name(self.queue_order_strategy)() self.Client = self._get_client() self.ResponseError = self._get_response_error() self.active_fanout_queues = set() self.auto_delete_queues = set() self._fanout_to_queue = {} self.handlers = {'BRPOP': self._brpop_read, 'LISTEN': self._receive} if self.fanout_prefix: if isinstance(self.fanout_prefix, string_t): self.keyprefix_fanout = self.fanout_prefix else: # previous versions did not set a fanout, so cannot enable # by default. self.keyprefix_fanout = '' # Evaluate connection. try: self.client.ping() except Exception: self._disconnect_pools() raise self.connection.cycle.add(self) # add to channel poller. # copy errors, in case channel closed but threads still # are still waiting for data. self.connection_errors = self.connection.connection_errors if register_after_fork is not None: register_after_fork(self, _after_fork_cleanup_channel) def _after_fork(self): self._disconnect_pools() def _disconnect_pools(self): pool = self._pool async_pool = self._async_pool self._async_pool = self._pool = None if pool is not None: pool.disconnect() if async_pool is not None: async_pool.disconnect() def _on_connection_disconnect(self, connection): if self._in_poll is connection: self._in_poll = None if self._in_listen is connection: self._in_listen = None if self.connection and self.connection.cycle: self.connection.cycle._on_connection_disconnect(connection) def _do_restore_message(self, payload, exchange, routing_key, client=None, leftmost=False): with self.conn_or_acquire(client) as client: try: try: payload['headers']['redelivered'] = True except KeyError: pass for queue in self._lookup(exchange, routing_key): (client.lpush if leftmost else client.rpush)( queue, dumps(payload), ) except Exception: crit('Could not restore message: %r', payload, exc_info=True) def _restore(self, message, leftmost=False): if not self.ack_emulation: return super(Channel, self)._restore(message) tag = message.delivery_tag with self.conn_or_acquire() as client: with client.pipeline() as pipe: P, _ = pipe.hget(self.unacked_key, tag) \ .hdel(self.unacked_key, tag) \ .execute() if P: M, EX, RK = loads(bytes_to_str(P)) # json is unicode self._do_restore_message(M, EX, RK, client, leftmost) def _restore_at_beginning(self, message): return self._restore(message, leftmost=True) def basic_consume(self, queue, *args, **kwargs): if queue in self._fanout_queues: exchange, _ = self._fanout_queues[queue] self.active_fanout_queues.add(queue) self._fanout_to_queue[exchange] = queue ret = super(Channel, self).basic_consume(queue, *args, **kwargs) # Update fair cycle between queues. # # We cycle between queues fairly to make sure that # each queue is equally likely to be consumed from, # so that a very busy queue will not block others. # # This works by using Redis's `BRPOP` command and # by rotating the most recently used queue to the # and of the list. See Kombu github issue #166 for # more discussion of this method. self._update_queue_cycle() return ret def basic_cancel(self, consumer_tag): # If we are busy reading messages we may experience # a race condition where a message is consumed after # canceling, so we must delay this operation until reading # is complete (Issue celery/celery#1773). connection = self.connection if connection: if connection.cycle._in_protected_read: return connection.cycle.after_read.add( promise(self._basic_cancel, (consumer_tag,)), ) return self._basic_cancel(consumer_tag) def _basic_cancel(self, consumer_tag): try: queue = self._tag_to_queue[consumer_tag] except KeyError: return try: self.active_fanout_queues.remove(queue) except KeyError: pass else: self._unsubscribe_from(queue) try: exchange, _ = self._fanout_queues[queue] self._fanout_to_queue.pop(exchange) except KeyError: pass ret = super(Channel, self).basic_cancel(consumer_tag) self._update_queue_cycle() return ret def _get_publish_topic(self, exchange, routing_key): if routing_key and self.fanout_patterns: return ''.join([self.keyprefix_fanout, exchange, '/', routing_key]) return ''.join([self.keyprefix_fanout, exchange]) def _get_subscribe_topic(self, queue): exchange, routing_key = self._fanout_queues[queue] return self._get_publish_topic(exchange, routing_key) def _subscribe(self): keys = [self._get_subscribe_topic(queue) for queue in self.active_fanout_queues] if not keys: return c = self.subclient if c.connection._sock is None: c.connection.connect() self._in_listen = c.connection c.psubscribe(keys) def _unsubscribe_from(self, queue): topic = self._get_subscribe_topic(queue) c = self.subclient if c.connection and c.connection._sock: c.unsubscribe([topic]) def _handle_message(self, client, r): if bytes_to_str(r[0]) == 'unsubscribe' and r[2] == 0: client.subscribed = False return if bytes_to_str(r[0]) == 'pmessage': type, pattern, channel, data = r[0], r[1], r[2], r[3] else: type, pattern, channel, data = r[0], None, r[1], r[2] return { 'type': type, 'pattern': pattern, 'channel': channel, 'data': data, } def _receive(self): c = self.subclient ret = [] try: ret.append(self._receive_one(c)) except Empty: pass if c.connection is not None: while c.connection.can_read(timeout=0): ret.append(self._receive_one(c)) return any(ret) def _receive_one(self, c): response = None try: response = c.parse_response() except self.connection_errors: self._in_listen = None raise if response is not None: payload = self._handle_message(c, response) if bytes_to_str(payload['type']).endswith('message'): channel = bytes_to_str(payload['channel']) if payload['data']: if channel[0] == '/': _, _, channel = channel.partition('.') try: message = loads(bytes_to_str(payload['data'])) except (TypeError, ValueError): warn('Cannot process event on channel %r: %s', channel, repr(payload)[:4096], exc_info=1) raise Empty() exchange = channel.split('/', 1)[0] self.connection._deliver( message, self._fanout_to_queue[exchange]) return True def _brpop_start(self, timeout=1): queues = self._queue_cycle.consume(len(self.active_queues)) if not queues: return keys = [self._q_for_pri(queue, pri) for pri in self.priority_steps for queue in queues] + [timeout or 0] self._in_poll = self.client.connection self.client.connection.send_command('BRPOP', *keys) def _brpop_read(self, **options): try: try: dest__item = self.client.parse_response(self.client.connection, 'BRPOP', **options) except self.connection_errors: # if there's a ConnectionError, disconnect so the next # iteration will reconnect automatically. self.client.connection.disconnect() raise if dest__item: dest, item = dest__item dest = bytes_to_str(dest).rsplit(self.sep, 1)[0] self._queue_cycle.rotate(dest) self.connection._deliver(loads(bytes_to_str(item)), dest) return True else: raise Empty() finally: self._in_poll = None def _poll_error(self, type, **options): if type == 'LISTEN': self.subclient.parse_response() else: self.client.parse_response(self.client.connection, type) def _get(self, queue): with self.conn_or_acquire() as client: for pri in self.priority_steps: item = client.rpop(self._q_for_pri(queue, pri)) if item: return loads(bytes_to_str(item)) raise Empty() def _size(self, queue): with self.conn_or_acquire() as client: with client.pipeline() as pipe: for pri in self.priority_steps: pipe = pipe.llen(self._q_for_pri(queue, pri)) sizes = pipe.execute() return sum(size for size in sizes if isinstance(size, numbers.Integral)) def _q_for_pri(self, queue, pri): pri = self.priority(pri) return '%s%s%s' % ((queue, self.sep, pri) if pri else (queue, '', '')) def priority(self, n): steps = self.priority_steps return steps[bisect(steps, n) - 1] def _put(self, queue, message, **kwargs): """Deliver message.""" pri = self._get_message_priority(message, reverse=False) with self.conn_or_acquire() as client: client.lpush(self._q_for_pri(queue, pri), dumps(message)) def _put_fanout(self, exchange, message, routing_key, **kwargs): """Deliver fanout message.""" with self.conn_or_acquire() as client: client.publish( self._get_publish_topic(exchange, routing_key), dumps(message), ) def _new_queue(self, queue, auto_delete=False, **kwargs): if auto_delete: self.auto_delete_queues.add(queue) def _queue_bind(self, exchange, routing_key, pattern, queue): if self.typeof(exchange).type == 'fanout': # Mark exchange as fanout. self._fanout_queues[queue] = ( exchange, routing_key.replace('#', '*'), ) with self.conn_or_acquire() as client: client.sadd(self.keyprefix_queue % (exchange,), self.sep.join([routing_key or '', pattern or '', queue or ''])) def _delete(self, queue, exchange, routing_key, pattern, *args, **kwargs): self.auto_delete_queues.discard(queue) with self.conn_or_acquire(client=kwargs.get('client')) as client: client.srem(self.keyprefix_queue % (exchange,), self.sep.join([routing_key or '', pattern or '', queue or ''])) with client.pipeline() as pipe: for pri in self.priority_steps: pipe = pipe.delete(self._q_for_pri(queue, pri)) pipe.execute() def _has_queue(self, queue, **kwargs): with self.conn_or_acquire() as client: with client.pipeline() as pipe: for pri in self.priority_steps: pipe = pipe.exists(self._q_for_pri(queue, pri)) return any(pipe.execute()) def get_table(self, exchange): key = self.keyprefix_queue % exchange with self.conn_or_acquire() as client: values = client.smembers(key) if not values: raise InconsistencyError(NO_ROUTE_ERROR.format(exchange, key)) return [tuple(bytes_to_str(val).split(self.sep)) for val in values] def _purge(self, queue): with self.conn_or_acquire() as client: with client.pipeline() as pipe: for pri in self.priority_steps: priq = self._q_for_pri(queue, pri) pipe = pipe.llen(priq).delete(priq) sizes = pipe.execute() return sum(sizes[::2]) def close(self): self._closing = True if not self.closed: # remove from channel poller. self.connection.cycle.discard(self) # delete fanout bindings client = self.__dict__.get('client') # only if property cached if client is not None: for queue in self._fanout_queues: if queue in self.auto_delete_queues: self.queue_delete(queue, client=client) self._disconnect_pools() self._close_clients() super(Channel, self).close() def _close_clients(self): # Close connections for attr in 'client', 'subclient': try: client = self.__dict__[attr] connection, client.connection = client.connection, None connection.disconnect() except (KeyError, AttributeError, self.ResponseError): pass def _prepare_virtual_host(self, vhost): if not isinstance(vhost, numbers.Integral): if not vhost or vhost == '/': vhost = DEFAULT_DB elif vhost.startswith('/'): vhost = vhost[1:] try: vhost = int(vhost) except ValueError: raise ValueError( 'Database is int between 0 and limit - 1, not {0}'.format( vhost, )) return vhost def _filter_tcp_connparams(self, socket_keepalive=None, socket_keepalive_options=None, **params): return params def _connparams(self, async=False): conninfo = self.connection.client connparams = { 'host': conninfo.hostname or '127.0.0.1', 'port': conninfo.port or self.connection.default_port, 'virtual_host': conninfo.virtual_host, 'password': conninfo.password, 'max_connections': self.max_connections, 'socket_timeout': self.socket_timeout, 'socket_connect_timeout': self.socket_connect_timeout, 'socket_keepalive': self.socket_keepalive, 'socket_keepalive_options': self.socket_keepalive_options, } if conninfo.ssl: # Connection(ssl={}) must be a dict containing the keys: # 'ssl_cert_reqs', 'ssl_ca_certs', 'ssl_certfile', 'ssl_keyfile' try: connparams.update(conninfo.ssl) connparams['connection_class'] = redis.SSLConnection except TypeError: pass host = connparams['host'] if '://' in host: scheme, _, _, _, password, path, query = _parse_url(host) if scheme == 'socket': connparams = self._filter_tcp_connparams(**connparams) connparams.update({ 'connection_class': redis.UnixDomainSocketConnection, 'path': '/' + path}, **query) connparams.pop('socket_connect_timeout', None) connparams.pop('socket_keepalive', None) connparams.pop('socket_keepalive_options', None) connparams['password'] = password connparams.pop('host', None) connparams.pop('port', None) connparams['db'] = self._prepare_virtual_host( connparams.pop('virtual_host', None)) channel = self connection_cls = ( connparams.get('connection_class') or self.connection_class ) if async: class Connection(connection_cls): def disconnect(self): super(Connection, self).disconnect() channel._on_connection_disconnect(self) connection_cls = Connection connparams['connection_class'] = connection_cls return connparams def _create_client(self, async=False): if async: return self.Client(connection_pool=self.async_pool) return self.Client(connection_pool=self.pool) def _get_pool(self, async=False): params = self._connparams(async=async) self.keyprefix_fanout = self.keyprefix_fanout.format(db=params['db']) return redis.ConnectionPool(**params) def _get_client(self): if redis.VERSION < (2, 10, 0): raise VersionMismatch( 'Redis transport requires redis-py versions 2.10.0 or later. ' 'You have {0.__version__}'.format(redis)) return redis.StrictRedis @contextmanager def conn_or_acquire(self, client=None): if client: yield client else: yield self._create_client() @property def pool(self): if self._pool is None: self._pool = self._get_pool() return self._pool @property def async_pool(self): if self._async_pool is None: self._async_pool = self._get_pool(async=True) return self._async_pool @cached_property def client(self): """Client used to publish messages, BRPOP etc.""" return self._create_client(async=True) @cached_property def subclient(self): """Pub/Sub connection used to consume fanout queues.""" client = self._create_client(async=True) return client.pubsub() def _update_queue_cycle(self): self._queue_cycle.update(self.active_queues) def _get_response_error(self): from redis import exceptions return exceptions.ResponseError @property def active_queues(self): """Set of queues being consumed from (excluding fanout queues).""" return {queue for queue in self._active_queues if queue not in self.active_fanout_queues} class Transport(virtual.Transport): """Redis Transport.""" Channel = Channel polling_interval = None # disable sleep between unsuccessful polls. default_port = DEFAULT_PORT driver_type = 'redis' driver_name = 'redis' implements = virtual.Transport.implements.extend( async=True, exchange_type=frozenset(['direct', 'topic', 'fanout']) ) def __init__(self, *args, **kwargs): if redis is None: raise ImportError('Missing redis library (pip install redis)') super(Transport, self).__init__(*args, **kwargs) # Get redis-py exceptions. self.connection_errors, self.channel_errors = self._get_errors() # All channels share the same poller. self.cycle = MultiChannelPoller() def driver_version(self): return redis.__version__ def register_with_event_loop(self, connection, loop): cycle = self.cycle cycle.on_poll_init(loop.poller) cycle_poll_start = cycle.on_poll_start add_reader = loop.add_reader on_readable = self.on_readable def _on_disconnect(connection): if connection._sock: loop.remove(connection._sock) cycle._on_connection_disconnect = _on_disconnect def on_poll_start(): cycle_poll_start() [add_reader(fd, on_readable, fd) for fd in cycle.fds] loop.on_tick.add(on_poll_start) loop.call_repeatedly(10, cycle.maybe_restore_messages) def on_readable(self, fileno): """Handle AIO event for one of our file descriptors.""" self.cycle.on_readable(fileno) def _get_errors(self): """Utility to import redis-py's exceptions at runtime.""" return get_redis_error_classes() class SentinelChannel(Channel): """Channel with explicit Redis Sentinel knowledge. Broker url is supposed to look like: sentinel://0.0.0.0:26379;sentinel://0.0.0.0:26380/... where each sentinel is separated by a `;`. Multiple sentinels are handled by :class:`kombu.Connection` constructor, and placed in the alternative list of servers to connect to in case of connection failure. Other arguments for the sentinel should come from the transport options (see :method:`Celery.connection` which is in charge of creating the `Connection` object). You must provide at least one option in Transport options: * `master_name` - name of the redis group to poll """ from_transport_options = Channel.from_transport_options + ( 'master_name', 'min_other_sentinels', 'sentinel_kwargs') connection_class = sentinel.SentinelManagedConnection if sentinel else None def _sentinel_managed_pool(self, async=False): connparams = self._connparams(async) additional_params = connparams.copy() additional_params.pop('host', None) additional_params.pop('port', None) sentinel_inst = sentinel.Sentinel( [(connparams['host'], connparams['port'])], min_other_sentinels=getattr(self, 'min_other_sentinels', 0), sentinel_kwargs=getattr(self, 'sentinel_kwargs', {}), **additional_params) master_name = getattr(self, 'master_name', None) return sentinel_inst.master_for( master_name, self.Client, ).connection_pool def _get_pool(self, async=False): return self._sentinel_managed_pool(async) class SentinelTransport(Transport): """Redis Sentinel Transport.""" default_port = 26379 Channel = SentinelChannel kombu-4.1.0/kombu/transport/mongodb.py0000644000175000017500000003256613130603207017736 0ustar omeromer00000000000000"""MongoDB transport. :copyright: (c) 2010 - 2013 by Flavio Percoco Premoli. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import, unicode_literals import datetime import pymongo from pymongo import errors from pymongo import MongoClient, uri_parser from pymongo.cursor import CursorType from kombu.exceptions import VersionMismatch from kombu.five import Empty, string_t from kombu.utils.compat import _detect_environment from kombu.utils.encoding import bytes_to_str from kombu.utils.json import loads, dumps from kombu.utils.objects import cached_property from . import virtual E_SERVER_VERSION = """\ Kombu requires MongoDB version 1.3+ (server is {0})\ """ E_NO_TTL_INDEXES = """\ Kombu requires MongoDB version 2.2+ (server is {0}) for TTL indexes support\ """ class BroadcastCursor(object): """Cursor for broadcast queues.""" def __init__(self, cursor): self._cursor = cursor self.purge(rewind=False) def get_size(self): return self._cursor.count() - self._offset def close(self): self._cursor.close() def purge(self, rewind=True): if rewind: self._cursor.rewind() # Fast forward the cursor past old events self._offset = self._cursor.count() self._cursor = self._cursor.skip(self._offset) def __iter__(self): return self def __next__(self): while True: try: msg = next(self._cursor) except pymongo.errors.OperationFailure as exc: # In some cases tailed cursor can become invalid # and have to be reinitalized if 'not valid at server' in str(exc): self.purge() continue raise else: break self._offset += 1 return msg next = __next__ class Channel(virtual.Channel): """MongoDB Channel.""" supports_fanout = True # Mutable container. Shared by all class instances _fanout_queues = {} # Options ssl = False ttl = False connect_timeout = None capped_queue_size = 100000 calc_queue_size = True default_hostname = '127.0.0.1' default_port = 27017 default_database = 'kombu_default' messages_collection = 'messages' routing_collection = 'messages.routing' broadcast_collection = 'messages.broadcast' queues_collection = 'messages.queues' from_transport_options = (virtual.Channel.from_transport_options + ( 'connect_timeout', 'ssl', 'ttl', 'capped_queue_size', 'default_hostname', 'default_port', 'default_database', 'messages_collection', 'routing_collection', 'broadcast_collection', 'queues_collection', 'calc_queue_size', )) def __init__(self, *vargs, **kwargs): super(Channel, self).__init__(*vargs, **kwargs) self._broadcast_cursors = {} # Evaluate connection self.client # AbstractChannel/Channel interface implementation def _new_queue(self, queue, **kwargs): if self.ttl: self.queues.update( {'_id': queue}, {'_id': queue, 'options': kwargs, 'expire_at': self._get_expire(kwargs, 'x-expires')}, upsert=True) def _get(self, queue): if queue in self._fanout_queues: try: msg = next(self._get_broadcast_cursor(queue)) except StopIteration: msg = None else: msg = self.messages.find_and_modify( query={'queue': queue}, sort=[('priority', pymongo.ASCENDING)], remove=True, ) if self.ttl: self._update_queues_expire(queue) if msg is None: raise Empty() return loads(bytes_to_str(msg['payload'])) def _size(self, queue): # Do not calculate actual queue size if requested # for performance considerations if not self.calc_queue_size: return super(Channel, self)._size(queue) if queue in self._fanout_queues: return self._get_broadcast_cursor(queue).get_size() return self.messages.find({'queue': queue}).count() def _put(self, queue, message, **kwargs): data = { 'payload': dumps(message), 'queue': queue, 'priority': self._get_message_priority(message, reverse=True) } if self.ttl: data['expire_at'] = self._get_expire(queue, 'x-message-ttl') self.messages.insert(data) def _put_fanout(self, exchange, message, routing_key, **kwargs): self.broadcast.insert({'payload': dumps(message), 'queue': exchange}) def _purge(self, queue): size = self._size(queue) if queue in self._fanout_queues: self._get_broadcast_cursor(queue).purge() else: self.messages.remove({'queue': queue}) return size def get_table(self, exchange): localRoutes = frozenset(self.state.exchanges[exchange]['table']) brokerRoutes = self.routing.find( {'exchange': exchange} ) return localRoutes | frozenset( (r['routing_key'], r['pattern'], r['queue']) for r in brokerRoutes ) def _queue_bind(self, exchange, routing_key, pattern, queue): if self.typeof(exchange).type == 'fanout': self._create_broadcast_cursor( exchange, routing_key, pattern, queue) self._fanout_queues[queue] = exchange lookup = { 'exchange': exchange, 'queue': queue, 'routing_key': routing_key, 'pattern': pattern, } data = lookup.copy() if self.ttl: data['expire_at'] = self._get_expire(queue, 'x-expires') self.routing.update(lookup, data, upsert=True) def queue_delete(self, queue, **kwargs): self.routing.remove({'queue': queue}) if self.ttl: self.queues.remove({'_id': queue}) super(Channel, self).queue_delete(queue, **kwargs) if queue in self._fanout_queues: try: cursor = self._broadcast_cursors.pop(queue) except KeyError: pass else: cursor.close() self._fanout_queues.pop(queue) # Implementation details def _parse_uri(self, scheme='mongodb://'): # See mongodb uri documentation: # https://docs.mongodb.org/manual/reference/connection-string/ client = self.connection.client hostname = client.hostname if not hostname.startswith(scheme): hostname = scheme + hostname if not hostname[len(scheme):]: hostname += self.default_hostname if client.userid and '@' not in hostname: head, tail = hostname.split('://') credentials = client.userid if client.password: credentials += ':' + client.password hostname = head + '://' + credentials + '@' + tail port = client.port if client.port else self.default_port parsed = uri_parser.parse_uri(hostname, port) dbname = parsed['database'] or client.virtual_host if dbname in ('/', None): dbname = self.default_database options = { 'auto_start_request': True, 'ssl': self.ssl, 'connectTimeoutMS': (int(self.connect_timeout * 1000) if self.connect_timeout else None), } options.update(parsed['options']) options = self._prepare_client_options(options) return hostname, dbname, options def _prepare_client_options(self, options): if pymongo.version_tuple >= (3,): options.pop('auto_start_request', None) if isinstance(options.get('readpreference'), int): modes = pymongo.read_preferences._MONGOS_MODES options['readpreference'] = modes[options['readpreference']] return options def _open(self, scheme='mongodb://'): hostname, dbname, conf = self._parse_uri(scheme=scheme) conf['host'] = hostname env = _detect_environment() if env == 'gevent': from gevent import monkey monkey.patch_all() elif env == 'eventlet': from eventlet import monkey_patch monkey_patch() mongoconn = MongoClient(**conf) database = mongoconn[dbname] version_str = mongoconn.server_info()['version'] version = tuple(map(int, version_str.split('.'))) if version < (1, 3): raise VersionMismatch(E_SERVER_VERSION.format(version_str)) elif self.ttl and version < (2, 2): raise VersionMismatch(E_NO_TTL_INDEXES.format(version_str)) return database def _create_broadcast(self, database): """Create capped collection for broadcast messages.""" if self.broadcast_collection in database.collection_names(): return database.create_collection(self.broadcast_collection, size=self.capped_queue_size, capped=True) def _ensure_indexes(self, database): """Ensure indexes on collections.""" messages = database[self.messages_collection] messages.ensure_index( [('queue', 1), ('priority', 1), ('_id', 1)], background=True, ) database[self.broadcast_collection].ensure_index([('queue', 1)]) routing = database[self.routing_collection] routing.ensure_index([('queue', 1), ('exchange', 1)]) if self.ttl: messages.ensure_index([('expire_at', 1)], expireAfterSeconds=0) routing.ensure_index([('expire_at', 1)], expireAfterSeconds=0) database[self.queues_collection].ensure_index( [('expire_at', 1)], expireAfterSeconds=0) def _create_client(self): """Actualy creates connection.""" database = self._open() self._create_broadcast(database) self._ensure_indexes(database) return database @cached_property def client(self): return self._create_client() @cached_property def messages(self): return self.client[self.messages_collection] @cached_property def routing(self): return self.client[self.routing_collection] @cached_property def broadcast(self): return self.client[self.broadcast_collection] @cached_property def queues(self): return self.client[self.queues_collection] def _get_broadcast_cursor(self, queue): try: return self._broadcast_cursors[queue] except KeyError: # Cursor may be absent when Channel created more than once. # _fanout_queues is a class-level mutable attribute so it's # shared over all Channel instances. return self._create_broadcast_cursor( self._fanout_queues[queue], None, None, queue, ) def _create_broadcast_cursor(self, exchange, routing_key, pattern, queue): if pymongo.version_tuple >= (3, ): query = dict( filter={'queue': exchange}, cursor_type=CursorType.TAILABLE ) else: query = dict( query={'queue': exchange}, tailable=True ) cursor = self.broadcast.find(**query) ret = self._broadcast_cursors[queue] = BroadcastCursor(cursor) return ret def _get_expire(self, queue, argument): """Get expiration header named `argument` of queue definition. Note: `queue` must be either queue name or options itself. """ if isinstance(queue, string_t): doc = self.queues.find_one({'_id': queue}) if not doc: return data = doc['options'] else: data = queue try: value = data['arguments'][argument] except (KeyError, TypeError): return return self.get_now() + datetime.timedelta(milliseconds=value) def _update_queues_expire(self, queue): """Update expiration field on queues documents.""" expire_at = self._get_expire(queue, 'x-expires') if not expire_at: return self.routing.update( {'queue': queue}, {'$set': {'expire_at': expire_at}}, multiple=True) self.queues.update( {'_id': queue}, {'$set': {'expire_at': expire_at}}, multiple=True) def get_now(self): """Return current time in UTC.""" return datetime.datetime.utcnow() class Transport(virtual.Transport): """MongoDB Transport.""" Channel = Channel can_parse_url = True polling_interval = 1 default_port = Channel.default_port connection_errors = ( virtual.Transport.connection_errors + (errors.ConnectionFailure,) ) channel_errors = ( virtual.Transport.channel_errors + ( errors.ConnectionFailure, errors.OperationFailure) ) driver_type = 'mongodb' driver_name = 'pymongo' implements = virtual.Transport.implements.extend( exchange_type=frozenset(['direct', 'topic', 'fanout']), ) def driver_version(self): return pymongo.version kombu-4.1.0/kombu/transport/consul.py0000644000175000017500000002154313130603207017605 0ustar omeromer00000000000000"""Consul Transport. It uses Consul.io's Key/Value store to transport messages in Queues It uses python-consul for talking to Consul's HTTP API """ from __future__ import absolute_import, unicode_literals import uuid import socket from collections import defaultdict from contextlib import contextmanager from kombu.exceptions import ChannelError from kombu.five import Empty, monotonic from kombu.log import get_logger from kombu.utils.json import loads, dumps from kombu.utils.objects import cached_property from . import virtual try: import consul except ImportError: consul = None logger = get_logger('kombu.transport.consul') DEFAULT_PORT = 8500 DEFAULT_HOST = 'localhost' class LockError(Exception): """An error occurred while trying to acquire the lock.""" class Channel(virtual.Channel): """Consul Channel class which talks to the Consul Key/Value store.""" prefix = 'kombu' index = None timeout = '10s' session_ttl = 30 def __init__(self, *args, **kwargs): if consul is None: raise ImportError('Missing python-consul library') super(Channel, self).__init__(*args, **kwargs) port = self.connection.client.port or self.connection.default_port host = self.connection.client.hostname or DEFAULT_HOST logger.debug('Host: %s Port: %s Timeout: %s', host, port, self.timeout) self.queues = defaultdict(dict) self.client = consul.Consul(host=host, port=int(port)) def _lock_key(self, queue): return '{0}/{1}.lock'.format(self.prefix, queue) def _key_prefix(self, queue): return '{0}/{1}'.format(self.prefix, queue) def _get_or_create_session(self, queue): """Get or create consul session. Try to renew the session if it exists, otherwise create a new session in Consul. This session is used to acquire a lock inside Consul so that we achieve read-consistency between the nodes. Arguments: queue (str): The name of the Queue. Returns: str: The ID of the session. """ try: session_id = self.queues[queue]['session_id'] except KeyError: session_id = None return (self._renew_existing_session(session_id) if session_id is not None else self._create_new_session()) def _renew_existing_session(self, session_id): logger.debug('Trying to renew existing session %s', session_id) session = self.client.session.renew(session_id=session_id) return session.get('ID') def _create_new_session(self): logger.debug('Creating session %s with TTL %s', self.lock_name, self.session_ttl) session_id = self.client.session.create( name=self.lock_name, ttl=self.session_ttl) logger.debug('Created session %s with id %s', self.lock_name, session_id) return session_id @contextmanager def _queue_lock(self, queue, raising=LockError): """Try to acquire a lock on the Queue. It does so by creating a object called 'lock' which is locked by the current session.. This way other nodes are not able to write to the lock object which means that they have to wait before the lock is released. Arguments: queue (str): The name of the Queue. raising (Exception): Set custom lock error class. Raises: LockError: if the lock cannot be acquired. Returns: bool: success? """ self._acquire_lock(queue, raising=raising) try: yield finally: self._release_lock(queue) def _acquire_lock(self, queue, raising=LockError): session_id = self._get_or_create_session(queue) lock_key = self._lock_key(queue) logger.debug('Trying to create lock object %s with session %s', lock_key, session_id) if self.client.kv.put(key=lock_key, acquire=session_id, value=self.lock_name): self.queues[queue]['session_id'] = session_id return logger.info('Could not acquire lock on key %s', lock_key) raise raising() def _release_lock(self, queue): """Try to release a lock. It does so by simply removing the lock key in Consul. Arguments: queue (str): The name of the queue we want to release the lock from. """ logger.debug('Removing lock key %s', self._lock_key(queue)) self.client.kv.delete(key=self._lock_key(queue)) def _destroy_session(self, queue): """Destroy a previously created Consul session. Will release all locks it still might hold. Arguments: queue (str): The name of the Queue. """ logger.debug('Destroying session %s', self.queues[queue]['session_id']) self.client.session.destroy(self.queues[queue]['session_id']) def _new_queue(self, queue, **_): self.queues[queue] = {'session_id': None} return self.client.kv.put(key=self._key_prefix(queue), value=None) def _delete(self, queue, *args, **_): self._destroy_session(queue) self.queues.pop(queue, None) self._purge(queue) def _put(self, queue, payload, **_): """Put `message` onto `queue`. This simply writes a key to the K/V store of Consul """ key = '{0}/msg/{1}_{2}'.format( self._key_prefix(queue), int(round(monotonic() * 1000)), uuid.uuid4(), ) if not self.client.kv.put(key=key, value=dumps(payload), cas=0): raise ChannelError('Cannot add key {0!r} to consul'.format(key)) def _get(self, queue, timeout=None): """Get the first available message from the queue. Before it does so it acquires a lock on the Key/Value store so only one node reads at the same time. This is for read consistency """ with self._queue_lock(queue, raising=Empty): key = '{0}/msg/'.format(self._key_prefix(queue)) logger.debug('Fetching key %s with index %s', key, self.index) self.index, data = self.client.kv.get( key=key, recurse=True, index=self.index, wait=self.timeout, ) try: if data is None: raise Empty() logger.debug('Removing key %s with modifyindex %s', data[0]['Key'], data[0]['ModifyIndex']) self.client.kv.delete(key=data[0]['Key'], cas=data[0]['ModifyIndex']) return loads(data[0]['Value']) except TypeError: pass raise Empty() def _purge(self, queue): self._destroy_session(queue) return self.client.kv.delete( key='{0}/msg/'.format(self._key_prefix(queue)), recurse=True, ) def _size(self, queue): size = 0 try: key = '{0}/msg/'.format(self._key_prefix(queue)) logger.debug('Fetching key recursively %s with index %s', key, self.index) self.index, data = self.client.kv.get( key=key, recurse=True, index=self.index, wait=self.timeout, ) size = len(data) except TypeError: pass logger.debug('Found %s keys under %s with index %s', size, key, self.index) return size @cached_property def lock_name(self): return '{0}'.format(socket.gethostname()) class Transport(virtual.Transport): """Consul K/V storage Transport for Kombu.""" Channel = Channel default_port = DEFAULT_PORT driver_type = 'consul' driver_name = 'consul' def __init__(self, *args, **kwargs): if consul is None: raise ImportError('Missing python-consul library') super(Transport, self).__init__(*args, **kwargs) self.connection_errors = ( virtual.Transport.connection_errors + ( consul.ConsulException, consul.base.ConsulException ) ) self.channel_errors = ( virtual.Transport.channel_errors + ( consul.ConsulException, consul.base.ConsulException ) ) def verify_connection(self, connection): port = connection.client.port or self.default_port host = connection.client.hostname or DEFAULT_HOST logger.debug('Verify Consul connection to %s:%s', host, port) try: client = consul.Consul(host=host, port=int(port)) client.agent.self() return True except ValueError: pass return False def driver_version(self): return consul.__version__ kombu-4.1.0/kombu/transport/SQS.py0000644000175000017500000004520313134153527016760 0ustar omeromer00000000000000"""Amazon SQS Transport. Amazon SQS transport module for Kombu. This package implements an AMQP-like interface on top of Amazons SQS service, with the goal of being optimized for high performance and reliability. The default settings for this module are focused now on high performance in task queue situations where tasks are small, idempotent and run very fast. SQS Features supported by this transport: Long Polling: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ sqs-long-polling.html Long polling is enabled by setting the `wait_time_seconds` transport option to a number > 1. Amazon supports up to 20 seconds. This is enabled with 10 seconds by default. Batch API Actions: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ sqs-batch-api.html The default behavior of the SQS Channel.drain_events() method is to request up to the 'prefetch_count' messages on every request to SQS. These messages are stored locally in a deque object and passed back to the Transport until the deque is empty, before triggering a new API call to Amazon. This behavior dramatically speeds up the rate that you can pull tasks from SQS when you have short-running tasks (or a large number of workers). When a Celery worker has multiple queues to monitor, it will pull down up to 'prefetch_count' messages from queueA and work on them all before moving on to queueB. If queueB is empty, it will wait up until 'polling_interval' expires before moving back and checking on queueA. """ from __future__ import absolute_import, unicode_literals import base64 import socket import string import uuid from vine import transform, ensure_promise, promise from kombu.async import get_event_loop from kombu.async.aws.ext import boto3, exceptions from kombu.async.aws.sqs.connection import AsyncSQSConnection from kombu.async.aws.sqs.message import AsyncMessage from kombu.five import Empty, range, string_t, text_t from kombu.log import get_logger from kombu.utils import scheduling from kombu.utils.encoding import bytes_to_str, safe_str from kombu.utils.json import loads, dumps from kombu.utils.objects import cached_property from . import virtual logger = get_logger(__name__) # dots are replaced by dash, all other punctuation # replaced by underscore. CHARS_REPLACE_TABLE = { ord(c): 0x5f for c in string.punctuation if c not in '-_.' } CHARS_REPLACE_TABLE[0x2e] = 0x2d # '.' -> '-' #: SQS bulk get supports a maximum of 10 messages at a time. SQS_MAX_MESSAGES = 10 def maybe_int(x): """Try to convert x' to int, or return x' if that fails.""" try: return int(x) except ValueError: return x class Channel(virtual.Channel): """SQS Channel.""" default_region = 'us-east-1' default_visibility_timeout = 1800 # 30 minutes. default_wait_time_seconds = 10 # up to 20 seconds max domain_format = 'kombu%(vhost)s' _asynsqs = None _sqs = None _queue_cache = {} _noack_queues = set() def __init__(self, *args, **kwargs): if boto3 is None: raise ImportError('boto3 is not installed') super(Channel, self).__init__(*args, **kwargs) # SQS blows up if you try to create a new queue when one already # exists but with a different visibility_timeout. This prepopulates # the queue_cache to protect us from recreating # queues that are known to already exist. self._update_queue_cache(self.queue_name_prefix) self.hub = kwargs.get('hub') or get_event_loop() def _update_queue_cache(self, queue_name_prefix): resp = self.sqs.list_queues(QueueNamePrefix=queue_name_prefix) for url in resp.get('QueueUrls', []): queue_name = url.split('/')[-1] self._queue_cache[queue_name] = url def basic_consume(self, queue, no_ack, *args, **kwargs): if no_ack: self._noack_queues.add(queue) if self.hub: self._loop1(queue) return super(Channel, self).basic_consume( queue, no_ack, *args, **kwargs ) def basic_cancel(self, consumer_tag): if consumer_tag in self._consumers: queue = self._tag_to_queue[consumer_tag] self._noack_queues.discard(queue) return super(Channel, self).basic_cancel(consumer_tag) def drain_events(self, timeout=None, callback=None, **kwargs): """Return a single payload message from one of our queues. Raises: Queue.Empty: if no messages available. """ # If we're not allowed to consume or have no consumers, raise Empty if not self._consumers or not self.qos.can_consume(): raise Empty() # At this point, go and get more messages from SQS self._poll(self.cycle, callback, timeout=timeout) def _reset_cycle(self): """Reset the consume cycle. Returns: FairCycle: object that points to our _get_bulk() method rather than the standard _get() method. This allows for multiple messages to be returned at once from SQS ( based on the prefetch limit). """ self._cycle = scheduling.FairCycle( self._get_bulk, self._active_queues, Empty, ) def entity_name(self, name, table=CHARS_REPLACE_TABLE): """Format AMQP queue name into a legal SQS queue name.""" if name.endswith('.fifo'): partial = name.rstrip('.fifo') partial = text_t(safe_str(partial)).translate(table) return partial + '.fifo' else: return text_t(safe_str(name)).translate(table) def canonical_queue_name(self, queue_name): return self.entity_name(self.queue_name_prefix + queue_name) def _new_queue(self, queue, **kwargs): """Ensure a queue with given name exists in SQS.""" if not isinstance(queue, string_t): return queue # Translate to SQS name for consistency with initial # _queue_cache population. queue = self.canonical_queue_name(queue) # The SQS ListQueues method only returns 1000 queues. When you have # so many queues, it's possible that the queue you are looking for is # not cached. In this case, we could update the cache with the exact # queue name first. if queue not in self._queue_cache: self._update_queue_cache(queue) try: return self._queue_cache[queue] except KeyError: attributes = {'VisibilityTimeout': str(self.visibility_timeout)} if queue.endswith('.fifo'): attributes['FifoQueue'] = 'true' resp = self._queue_cache[queue] = self.sqs.create_queue( QueueName=queue, Attributes=attributes) self._queue_cache[queue] = resp['QueueUrl'] return resp['QueueUrl'] def _delete(self, queue, *args, **kwargs): """Delete queue by name.""" super(Channel, self)._delete(queue) self._queue_cache.pop(queue, None) def _put(self, queue, message, **kwargs): """Put message onto queue.""" q_url = self._new_queue(queue) kwargs = {'QueueUrl': q_url, 'MessageBody': AsyncMessage().encode(dumps(message))} if queue.endswith('.fifo'): if 'MessageGroupId' in message['properties']: kwargs['MessageGroupId'] = \ message['properties']['MessageGroupId'] else: kwargs['MessageGroupId'] = 'default' if 'MessageDeduplicationId' in message['properties']: kwargs['MessageDeduplicationId'] = \ message['properties']['MessageDeduplicationId'] else: kwargs['MessageDeduplicationId'] = str(uuid.uuid4()) self.sqs.send_message(**kwargs) def _message_to_python(self, message, queue_name, queue): body = base64.b64decode(message['Body'].encode()) payload = loads(bytes_to_str(body)) if queue_name in self._noack_queues: queue = self._new_queue(queue_name) self.asynsqs.delete_message(queue, message['ReceiptHandle']) else: try: properties = payload['properties'] delivery_info = payload['properties']['delivery_info'] except KeyError: # json message not sent by kombu? delivery_info = {} properties = {'delivery_info': delivery_info} payload.update({ 'body': bytes_to_str(body), 'properties': properties, }) # set delivery tag to SQS receipt handle delivery_info.update({ 'sqs_message': message, 'sqs_queue': queue, }) properties['delivery_tag'] = message['ReceiptHandle'] return payload def _messages_to_python(self, messages, queue): """Convert a list of SQS Message objects into Payloads. This method handles converting SQS Message objects into Payloads, and appropriately updating the queue depending on the 'ack' settings for that queue. Arguments: messages (SQSMessage): A list of SQS Message objects. queue (str): Name representing the queue they came from. Returns: List: A list of Payload objects """ q = self._new_queue(queue) return [self._message_to_python(m, queue, q) for m in messages] def _get_bulk(self, queue, max_if_unlimited=SQS_MAX_MESSAGES, callback=None): """Try to retrieve multiple messages off ``queue``. Where :meth:`_get` returns a single Payload object, this method returns a list of Payload objects. The number of objects returned is determined by the total number of messages available in the queue and the number of messages the QoS object allows (based on the prefetch_count). Note: Ignores QoS limits so caller is responsible for checking that we are allowed to consume at least one message from the queue. get_bulk will then ask QoS for an estimate of the number of extra messages that we can consume. Arguments: queue (str): The queue name to pull from. Returns: List[Message] """ # drain_events calls `can_consume` first, consuming # a token, so we know that we are allowed to consume at least # one message. # Note: ignoring max_messages for SQS with boto3 max_count = self._get_message_estimate() if max_count: q_url = self._new_queue(queue) resp = self.sqs.receive_message( QueueUrl=q_url, MaxNumberOfMessages=max_count, WaitTimeSeconds=self.wait_time_seconds) if resp.get('Messages'): for m in resp['Messages']: m['Body'] = AsyncMessage(body=m['Body']).decode() for msg in self._messages_to_python(resp['Messages'], queue): self.connection._deliver(msg, queue) return raise Empty() def _get(self, queue): """Try to retrieve a single message off ``queue``.""" q_url = self._new_queue(queue) resp = self.sqs.receive_message( QueueUrl=q_url, MaxNumberOfMessages=1, WaitTimeSeconds=self.wait_time_seconds) if resp.get('Messages'): body = AsyncMessage(body=resp['Messages'][0]['Body']).decode() resp['Messages'][0]['Body'] = body return self._messages_to_python(resp['Messages'], queue)[0] raise Empty() def _loop1(self, queue, _=None): self.hub.call_soon(self._schedule_queue, queue) def _schedule_queue(self, queue): if queue in self._active_queues: if self.qos.can_consume(): self._get_bulk_async( queue, callback=promise(self._loop1, (queue,)), ) else: self._loop1(queue) def _get_message_estimate(self, max_if_unlimited=SQS_MAX_MESSAGES): maxcount = self.qos.can_consume_max_estimate() return min( max_if_unlimited if maxcount is None else max(maxcount, 1), max_if_unlimited, ) def _get_bulk_async(self, queue, max_if_unlimited=SQS_MAX_MESSAGES, callback=None): maxcount = self._get_message_estimate() if maxcount: return self._get_async(queue, maxcount, callback=callback) # Not allowed to consume, make sure to notify callback.. callback = ensure_promise(callback) callback([]) return callback def _get_async(self, queue, count=1, callback=None): q = self._new_queue(queue) qname = self.canonical_queue_name(queue) return self._get_from_sqs( qname, count=count, connection=self.asynsqs, callback=transform(self._on_messages_ready, callback, q, queue), ) def _on_messages_ready(self, queue, qname, messages): if 'Messages' in messages and messages['Messages']: callbacks = self.connection._callbacks for msg in messages['Messages']: msg_parsed = self._message_to_python(msg, qname, queue) callbacks[qname](msg_parsed) def _get_from_sqs(self, queue, count=1, connection=None, callback=None): """Retrieve and handle messages from SQS. Uses long polling and returns :class:`~vine.promises.promise`. """ connection = connection if connection is not None else queue.connection return connection.receive_message( queue, number_messages=count, wait_time_seconds=self.wait_time_seconds, callback=callback, ) def _restore(self, message, unwanted_delivery_info=('sqs_message', 'sqs_queue')): for unwanted_key in unwanted_delivery_info: # Remove objects that aren't JSON serializable (Issue #1108). message.delivery_info.pop(unwanted_key, None) return super(Channel, self)._restore(message) def basic_ack(self, delivery_tag, multiple=False): try: message = self.qos.get(delivery_tag).delivery_info sqs_message = message['sqs_message'] except KeyError: pass else: self.asynsqs.delete_message(message['sqs_queue'], sqs_message['ReceiptHandle']) super(Channel, self).basic_ack(delivery_tag) def _size(self, queue): """Return the number of messages in a queue.""" url = self._new_queue(queue) resp = self.sqs.get_queue_attributes( QueueUrl=url, AttributeNames=['ApproximateNumberOfMessages']) return int(resp['Attributes']['ApproximateNumberOfMessages']) def _purge(self, queue): """Delete all current messages in a queue.""" q = self._new_queue(queue) # SQS is slow at registering messages, so run for a few # iterations to ensure messages are detected and deleted. size = 0 for i in range(10): size += int(self._size(queue)) if not size: break self.sqs.purge_queue(QueueUrl=q) return size def close(self): super(Channel, self).close() # if self._asynsqs: # try: # self.asynsqs.close() # except AttributeError as exc: # FIXME ??? # if "can't set attribute" not in str(exc): # raise @property def sqs(self): if self._sqs is None: session = boto3.session.Session( region_name=self.region, aws_access_key_id=self.conninfo.userid, aws_secret_access_key=self.conninfo.password, ) is_secure = self.is_secure if self.is_secure is not None else True client_kwargs = dict( use_ssl=is_secure ) if self.endpoint_url is not None: client_kwargs['endpoint_url'] = self.endpoint_url self._sqs = session.client('sqs', **client_kwargs) return self._sqs @property def asynsqs(self): if self._asynsqs is None: self._asynsqs = AsyncSQSConnection( sqs_connection=self.sqs, region=self.region ) return self._asynsqs @property def conninfo(self): return self.connection.client @property def transport_options(self): return self.connection.client.transport_options @cached_property def visibility_timeout(self): return (self.transport_options.get('visibility_timeout') or self.default_visibility_timeout) @cached_property def queue_name_prefix(self): return self.transport_options.get('queue_name_prefix', '') @cached_property def supports_fanout(self): return False @cached_property def region(self): return self.transport_options.get('region') or self.default_region @cached_property def regioninfo(self): return self.transport_options.get('regioninfo') @cached_property def is_secure(self): return self.transport_options.get('is_secure') @cached_property def port(self): return self.transport_options.get('port') @cached_property def endpoint_url(self): if self.conninfo.hostname is not None: scheme = 'https' if self.is_secure else 'http' if self.conninfo.port is not None: port = ':{}'.format(self.conninfo.port) else: port = '' return '{}://{}{}'.format( scheme, self.conninfo.hostname, port ) @cached_property def wait_time_seconds(self): return self.transport_options.get('wait_time_seconds', self.default_wait_time_seconds) class Transport(virtual.Transport): """SQS Transport.""" Channel = Channel polling_interval = 1 wait_time_seconds = 0 default_port = None connection_errors = ( virtual.Transport.connection_errors + (exceptions.BotoCoreError, socket.error) ) channel_errors = ( virtual.Transport.channel_errors + (exceptions.BotoCoreError,) ) driver_type = 'sqs' driver_name = 'sqs' implements = virtual.Transport.implements.extend( async=True, exchange_type=frozenset(['direct']), ) @property def default_connection_params(self): return {'port': self.default_port} kombu-4.1.0/kombu/transport/librabbitmq.py0000644000175000017500000001341313130603207020567 0ustar omeromer00000000000000"""`librabbitmq`_ transport. .. _`librabbitmq`: https://pypi.python.org/librabbitmq/ """ from __future__ import absolute_import, unicode_literals import os import socket import warnings import librabbitmq as amqp from librabbitmq import ChannelError, ConnectionError from kombu.five import items, values from kombu.utils.amq_manager import get_manager from kombu.utils.text import version_string_as_tuple from . import base from .base import to_rabbitmq_queue_arguments W_VERSION = """ librabbitmq version too old to detect RabbitMQ version information so make sure you are using librabbitmq 1.5 when using rabbitmq > 3.3 """ DEFAULT_PORT = 5672 DEFAULT_SSL_PORT = 5671 NO_SSL_ERROR = """\ ssl not supported by librabbitmq, please use pyamqp:// or stunnel\ """ class Message(base.Message): """AMQP Message (librabbitmq).""" def __init__(self, channel, props, info, body): super(Message, self).__init__( channel=channel, body=body, delivery_info=info, properties=props, delivery_tag=info.get('delivery_tag'), content_type=props.get('content_type'), content_encoding=props.get('content_encoding'), headers=props.get('headers')) class Channel(amqp.Channel, base.StdChannel): """AMQP Channel (librabbitmq).""" Message = Message def prepare_message(self, body, priority=None, content_type=None, content_encoding=None, headers=None, properties=None): """Encapsulate data into a AMQP message.""" properties = properties if properties is not None else {} properties.update({'content_type': content_type, 'content_encoding': content_encoding, 'headers': headers, 'priority': priority}) return body, properties def prepare_queue_arguments(self, arguments, **kwargs): arguments = to_rabbitmq_queue_arguments(arguments, **kwargs) return {k.encode('utf8'): v for k, v in items(arguments)} class Connection(amqp.Connection): """AMQP Connection (librabbitmq).""" Channel = Channel Message = Message class Transport(base.Transport): """AMQP Transport (librabbitmq).""" Connection = Connection default_port = DEFAULT_PORT default_ssl_port = DEFAULT_SSL_PORT connection_errors = ( base.Transport.connection_errors + ( ConnectionError, socket.error, IOError, OSError) ) channel_errors = ( base.Transport.channel_errors + (ChannelError,) ) driver_type = 'amqp' driver_name = 'librabbitmq' implements = base.Transport.implements.extend( async=True, heartbeats=False, ) def __init__(self, client, **kwargs): self.client = client self.default_port = kwargs.get('default_port') or self.default_port self.default_ssl_port = (kwargs.get('default_ssl_port') or self.default_ssl_port) self.__reader = None def driver_version(self): return amqp.__version__ def create_channel(self, connection): return connection.channel() def drain_events(self, connection, **kwargs): return connection.drain_events(**kwargs) def establish_connection(self): """Establish connection to the AMQP broker.""" conninfo = self.client for name, default_value in items(self.default_connection_params): if not getattr(conninfo, name, None): setattr(conninfo, name, default_value) if conninfo.ssl: raise NotImplementedError(NO_SSL_ERROR) opts = dict({ 'host': conninfo.host, 'userid': conninfo.userid, 'password': conninfo.password, 'virtual_host': conninfo.virtual_host, 'login_method': conninfo.login_method, 'insist': conninfo.insist, 'ssl': conninfo.ssl, 'connect_timeout': conninfo.connect_timeout, }, **conninfo.transport_options or {}) conn = self.Connection(**opts) conn.client = self.client self.client.drain_events = conn.drain_events return conn def close_connection(self, connection): """Close the AMQP broker connection.""" self.client.drain_events = None connection.close() def _collect(self, connection): if connection is not None: for channel in values(connection.channels): channel.connection = None try: os.close(connection.fileno()) except OSError: pass connection.channels.clear() connection.callbacks.clear() self.client.drain_events = None self.client = None def verify_connection(self, connection): return connection.connected def register_with_event_loop(self, connection, loop): loop.add_reader( connection.fileno(), self.on_readable, connection, loop, ) def get_manager(self, *args, **kwargs): return get_manager(self.client, *args, **kwargs) def qos_semantics_matches_spec(self, connection): try: props = connection.server_properties except AttributeError: warnings.warn(UserWarning(W_VERSION)) else: if props.get('product') == 'RabbitMQ': return version_string_as_tuple(props['version']) < (3, 3) return True @property def default_connection_params(self): return { 'userid': 'guest', 'password': 'guest', 'port': (self.default_ssl_port if self.client.ssl else self.default_port), 'hostname': 'localhost', 'login_method': 'AMQPLAIN', } kombu-4.1.0/kombu/transport/qpid.py0000644000175000017500000021404513130603207017240 0ustar omeromer00000000000000"""Qpid Transport. `Qpid`_ transport using `qpid-python`_ as the client and `qpid-tools`_ for broker management. The use this transport you must install the necessary dependencies. These dependencies are available via PyPI and can be installed using the pip command: .. code-block:: console $ pip install kombu[qpid] or to install the requirements manually: .. code-block:: console $ pip install qpid-tools qpid-python .. admonition:: Python 3 and PyPy Limitations The Qpid transport does not support Python 3 or PyPy environments due to underlying dependencies not being compatible. This version is tested and works with with Python 2.7. .. _`Qpid`: https://qpid.apache.org/ .. _`qpid-python`: https://pypi.python.org/pypi/qpid-python/ .. _`qpid-tools`: https://pypi.python.org/pypi/qpid-tools/ Authentication ============== This transport supports SASL authentication with the Qpid broker. Normally, SASL mechanisms are negotiated from a client list and a server list of possible mechanisms, but in practice, different SASL client libraries give different behaviors. These different behaviors cause the expected SASL mechanism to not be selected in many cases. As such, this transport restricts the mechanism types based on Kombu's configuration according to the following table. +------------------------------------+--------------------+ | **Broker String** | **SASL Mechanism** | +------------------------------------+--------------------+ | qpid://hostname/ | ANONYMOUS | +------------------------------------+--------------------+ | qpid://username:password@hostname/ | PLAIN | +------------------------------------+--------------------+ | see instructions below | EXTERNAL | +------------------------------------+--------------------+ The user can override the above SASL selection behaviors and specify the SASL string using the :attr:`~kombu.Connection.login_method` argument to the :class:`~kombu.Connection` object. The string can be a single SASL mechanism or a space separated list of SASL mechanisms. If you are using Celery with Kombu, this can be accomplished by setting the *BROKER_LOGIN_METHOD* Celery option. .. note:: While using SSL, Qpid users may want to override the SASL mechanism to use *EXTERNAL*. In that case, Qpid requires a username to be presented that matches the *CN* of the SSL client certificate. Ensure that the broker string contains the corresponding username. For example, if the client certificate has *CN=asdf* and the client connects to *example.com* on port 5671, the broker string should be: **qpid://asdf@example.com:5671/** Transport Options ================= The :attr:`~kombu.Connection.transport_options` argument to the :class:`~kombu.Connection` object are passed directly to the :class:`qpid.messaging.endpoints.Connection` as keyword arguments. These options override and replace any other default or specified values. If using Celery, this can be accomplished by setting the *BROKER_TRANSPORT_OPTIONS* Celery option. """ from __future__ import absolute_import, unicode_literals from collections import OrderedDict import os import select import socket import ssl import sys import uuid from gettext import gettext as _ import amqp.protocol try: import fcntl except ImportError: fcntl = None # noqa try: import qpidtoollibs except ImportError: # pragma: no cover qpidtoollibs = None # noqa try: from qpid.messaging.exceptions import ConnectionError, NotFound from qpid.messaging.exceptions import Empty as QpidEmpty from qpid.messaging.exceptions import SessionClosed except ImportError: # pragma: no cover ConnectionError = None NotFound = None QpidEmpty = None SessionClosed = None try: import qpid except ImportError: # pragma: no cover qpid = None from kombu.five import Empty, items, monotonic from kombu.log import get_logger from kombu.transport.virtual import Base64, Message from kombu.transport import base logger = get_logger(__name__) OBJECT_ALREADY_EXISTS_STRING = 'object already exists' VERSION = (1, 0, 0) __version__ = '.'.join(map(str, VERSION)) PY3 = sys.version_info[0] == 3 def dependency_is_none(dependency): """Return True if the dependency is None, otherwise False. This is done using a function so that tests can mock this behavior easily. :param dependency: The module to check if it is None :return: True if dependency is None otherwise False. """ return dependency is None class AuthenticationFailure(Exception): """Cannot authenticate with Qpid.""" class QoS(object): """A helper object for message prefetch and ACKing purposes. :keyword prefetch_count: Initial prefetch count, hard set to 1. :type prefetch_count: int NOTE: prefetch_count is currently hard set to 1, and needs to be improved This object is instantiated 1-for-1 with a :class:`~.kombu.transport.qpid.Channel` instance. QoS allows ``prefetch_count`` to be set to the number of outstanding messages the corresponding :class:`~kombu.transport.qpid.Channel` should be allowed to prefetch. Setting ``prefetch_count`` to 0 disables prefetch limits, and the object can hold an arbitrary number of messages. Messages are added using :meth:`append`, which are held until they are ACKed asynchronously through a call to :meth:`ack`. Messages that are received, but not ACKed will not be delivered by the broker to another consumer until an ACK is received, or the session is closed. Messages are referred to using delivery_tag, which are unique per :class:`Channel`. Delivery tags are managed outside of this object and are passed in with a message to :meth:`append`. Un-ACKed messages can be looked up from QoS using :meth:`get` and can be rejected and forgotten using :meth:`reject`. """ def __init__(self, session, prefetch_count=1): self.session = session self.prefetch_count = 1 self._not_yet_acked = OrderedDict() def can_consume(self): """Return True if the :class:`Channel` can consume more messages. Used to ensure the client adheres to currently active prefetch limits. :returns: True, if this QoS object can accept more messages without violating the prefetch_count. If prefetch_count is 0, can_consume will always return True. :rtype: bool """ return ( not self.prefetch_count or len(self._not_yet_acked) < self.prefetch_count ) def can_consume_max_estimate(self): """Return the remaining message capacity. Returns an estimated number of outstanding messages that a :class:`kombu.transport.qpid.Channel` can accept without exceeding ``prefetch_count``. If ``prefetch_count`` is 0, then this method returns 1. :returns: The number of estimated messages that can be fetched without violating the prefetch_count. :rtype: int """ return 1 if not self.prefetch_count else ( self.prefetch_count - len(self._not_yet_acked) ) def append(self, message, delivery_tag): """Append message to the list of un-ACKed messages. Add a message, referenced by the delivery_tag, for ACKing, rejecting, or getting later. Messages are saved into an :class:`collections.OrderedDict` by delivery_tag. :param message: A received message that has not yet been ACKed. :type message: qpid.messaging.Message :param delivery_tag: A UUID to refer to this message by upon receipt. :type delivery_tag: uuid.UUID """ self._not_yet_acked[delivery_tag] = message def get(self, delivery_tag): """Get an un-ACKed message by delivery_tag. If called with an invalid delivery_tag a :exc:`KeyError` is raised. :param delivery_tag: The delivery tag associated with the message to be returned. :type delivery_tag: uuid.UUID :return: An un-ACKed message that is looked up by delivery_tag. :rtype: qpid.messaging.Message """ return self._not_yet_acked[delivery_tag] def ack(self, delivery_tag): """Acknowledge a message by delivery_tag. Called asynchronously once the message has been handled and can be forgotten by the broker. :param delivery_tag: the delivery tag associated with the message to be acknowledged. :type delivery_tag: uuid.UUID """ message = self._not_yet_acked.pop(delivery_tag) self.session.acknowledge(message=message) def reject(self, delivery_tag, requeue=False): """Reject a message by delivery_tag. Explicitly notify the broker that the channel associated with this QoS object is rejecting the message that was previously delivered. If requeue is False, then the message is not requeued for delivery to another consumer. If requeue is True, then the message is requeued for delivery to another consumer. :param delivery_tag: The delivery tag associated with the message to be rejected. :type delivery_tag: uuid.UUID :keyword requeue: If True, the broker will be notified to requeue the message. If False, the broker will be told to drop the message entirely. In both cases, the message will be removed from this object. :type requeue: bool """ message = self._not_yet_acked.pop(delivery_tag) QpidDisposition = qpid.messaging.Disposition if requeue: disposition = QpidDisposition(qpid.messaging.RELEASED) else: disposition = QpidDisposition(qpid.messaging.REJECTED) self.session.acknowledge(message=message, disposition=disposition) class Channel(base.StdChannel): """Supports broker configuration and messaging send and receive. :param connection: A Connection object that this Channel can reference. Currently only used to access callbacks. :type connection: kombu.transport.qpid.Connection :param transport: The Transport this Channel is associated with. :type transport: kombu.transport.qpid.Transport A channel object is designed to have method-parity with a Channel as defined in AMQP 0-10 and earlier, which allows for the following broker actions: - exchange declare and delete - queue declare and delete - queue bind and unbind operations - queue length and purge operations - sending/receiving/rejecting messages - structuring, encoding, and decoding messages - supports synchronous and asynchronous reads - reading state about the exchange, queues, and bindings Channels are designed to all share a single TCP connection with a broker, but provide a level of isolated communication with the broker while benefiting from a shared TCP connection. The Channel is given its :class:`~kombu.transport.qpid.Connection` object by the :class:`~kombu.transport.qpid.Transport` that instantiates the channel. This channel inherits from :class:`~kombu.transport.base.StdChannel`, which makes this a 'native' channel versus a 'virtual' channel which would inherit from :class:`kombu.transports.virtual`. Messages sent using this channel are assigned a delivery_tag. The delivery_tag is generated for a message as they are prepared for sending by :meth:`basic_publish`. The delivery_tag is unique per channel instance. The delivery_tag has no meaningful context in other objects, and is only maintained in the memory of this object, and the underlying :class:`QoS` object that provides support. Each channel object instantiates exactly one :class:`QoS` object for prefetch limiting, and asynchronous ACKing. The :class:`QoS` object is lazily instantiated through a property method :meth:`qos`. The :class:`QoS` object is a supporting object that should not be accessed directly except by the channel itself. Synchronous reads on a queue are done using a call to :meth:`basic_get` which uses :meth:`_get` to perform the reading. These methods read immediately and do not accept any form of timeout. :meth:`basic_get` reads synchronously and ACKs messages before returning them. ACKing is done in all cases, because an application that reads messages using qpid.messaging, but does not ACK them will experience a memory leak. The no_ack argument to :meth:`basic_get` does not affect ACKing functionality. Asynchronous reads on a queue are done by starting a consumer using :meth:`basic_consume`. Each call to :meth:`basic_consume` will cause a :class:`~qpid.messaging.endpoints.Receiver` to be created on the :class:`~qpid.messaging.endpoints.Session` started by the :class: `Transport`. The receiver will asynchronously read using qpid.messaging, and prefetch messages before the call to :meth:`Transport.basic_drain` occurs. The prefetch_count value of the :class:`QoS` object is the capacity value of the new receiver. The new receiver capacity must always be at least 1, otherwise none of the receivers will appear to be ready for reading, and will never be read from. Each call to :meth:`basic_consume` creates a consumer, which is given a consumer tag that is identified by the caller of :meth:`basic_consume`. Already started consumers can be cancelled using by their consumer_tag using :meth:`basic_cancel`. Cancellation of a consumer causes the :class:`~qpid.messaging.endpoints.Receiver` object to be closed. Asynchronous message ACKing is supported through :meth:`basic_ack`, and is referenced by delivery_tag. The Channel object uses its :class:`QoS` object to perform the message ACKing. """ #: A class reference that will be instantiated using the qos property. QoS = QoS #: A class reference that identifies # :class:`~kombu.transport.virtual.Message` as the message class type Message = Message #: Default body encoding. #: NOTE: ``transport_options['body_encoding']`` will override this value. body_encoding = 'base64' #: Binary <-> ASCII codecs. codecs = {'base64': Base64()} def __init__(self, connection, transport): self.connection = connection self.transport = transport qpid_connection = connection.get_qpid_connection() self._broker = qpidtoollibs.BrokerAgent(qpid_connection) self.closed = False self._tag_to_queue = {} self._receivers = {} self._qos = None def _get(self, queue): """Non-blocking, single-message read from a queue. An internal method to perform a non-blocking, single-message read from a queue by name. This method creates a :class:`~qpid.messaging.endpoints.Receiver` to read from the queue using the :class:`~qpid.messaging.endpoints.Session` saved on the associated :class:`~kombu.transport.qpid.Transport`. The receiver is closed before the method exits. If a message is available, a :class:`qpid.messaging.Message` object is returned. If no message is available, a :class:`qpid.messaging.exceptions.Empty` exception is raised. This is an internal method. External calls for get functionality should be done using :meth:`basic_get`. :param queue: The queue name to get the message from :type queue: str :return: The received message. :rtype: :class:`qpid.messaging.Message` :raises: :class:`qpid.messaging.exceptions.Empty` if no message is available. """ rx = self.transport.session.receiver(queue) try: message = rx.fetch(timeout=0) finally: rx.close() return message def _put(self, routing_key, message, exchange=None, **kwargs): """Synchronously send a single message onto a queue or exchange. An internal method which synchronously sends a single message onto a given queue or exchange. If exchange is not specified, the message is sent directly to a queue specified by routing_key. If no queue is found by the name of routing_key while exchange is not specified an exception is raised. If an exchange is specified, then the message is delivered onto the requested exchange using routing_key. Message sending is synchronous using sync=True because large messages in kombu funtests were not being fully sent before the receiver closed. This method creates a :class:`qpid.messaging.endpoints.Sender` to send the message to the queue using the :class:`qpid.messaging.endpoints.Session` created and referenced by the associated :class:`~kombu.transport.qpid.Transport`. The sender is closed before the method exits. External calls for put functionality should be done using :meth:`basic_publish`. :param routing_key: If exchange is None, treated as the queue name to send the message to. If exchange is not None, treated as the routing_key to use as the message is submitted onto the exchange. :type routing_key: str :param message: The message to be sent as prepared by :meth:`basic_publish`. :type message: dict :keyword exchange: keyword parameter of the exchange this message should be sent on. If no exchange is specified, the message is sent directly to a queue specified by routing_key. :type exchange: str """ if not exchange: address = '%s; {assert: always, node: {type: queue}}' % ( routing_key,) msg_subject = None else: address = '%s/%s; {assert: always, node: {type: topic}}' % ( exchange, routing_key) msg_subject = str(routing_key) sender = self.transport.session.sender(address) qpid_message = qpid.messaging.Message(content=message, subject=msg_subject) try: sender.send(qpid_message, sync=True) finally: sender.close() def _purge(self, queue): """Purge all undelivered messages from a queue specified by name. An internal method to purge all undelivered messages from a queue specified by name. If the queue does not exist a :class:`qpid.messaging.exceptions.NotFound` exception is raised. The queue message depth is first checked, and then the broker is asked to purge that number of messages. The integer number of messages requested to be purged is returned. The actual number of messages purged may be different than the requested number of messages to purge (see below). Sometimes delivered messages are asked to be purged, but are not. This case fails silently, which is the correct behavior when a message that has been delivered to a different consumer, who has not ACKed the message, and still has an active session with the broker. Messages in that case are not safe for purging and will be retained by the broker. The client is unable to change this delivery behavior. This is an internal method. External calls for purge functionality should be done using :meth:`queue_purge`. :param queue: the name of the queue to be purged :type queue: str :return: The number of messages requested to be purged. :rtype: int :raises: :class:`qpid.messaging.exceptions.NotFound` if the queue being purged cannot be found. """ queue_to_purge = self._broker.getQueue(queue) if queue_to_purge is None: error_text = "NOT_FOUND - no queue '{0}'".format(queue) raise NotFound(code=404, text=error_text) message_count = queue_to_purge.values['msgDepth'] if message_count > 0: queue_to_purge.purge(message_count) return message_count def _size(self, queue): """Get the number of messages in a queue specified by name. An internal method to return the number of messages in a queue specified by name. It returns an integer count of the number of messages currently in the queue. :param queue: The name of the queue to be inspected for the number of messages :type queue: str :return the number of messages in the queue specified by name. :rtype: int """ queue_to_check = self._broker.getQueue(queue) message_depth = queue_to_check.values['msgDepth'] return message_depth def _delete(self, queue, *args, **kwargs): """Delete a queue and all messages on that queue. An internal method to delete a queue specified by name and all the messages on it. First, all messages are purged from a queue using a call to :meth:`_purge`. Second, the broker is asked to delete the queue. This is an internal method. External calls for queue delete functionality should be done using :meth:`queue_delete`. :param queue: The name of the queue to be deleted. :type queue: str """ self._purge(queue) self._broker.delQueue(queue) def _has_queue(self, queue, **kwargs): """Determine if the broker has a queue specified by name. :param queue: The queue name to check if the queue exists. :type queue: str :return: True if a queue exists on the broker, and false otherwise. :rtype: bool """ if self._broker.getQueue(queue): return True else: return False def queue_declare(self, queue, passive=False, durable=False, exclusive=False, auto_delete=True, nowait=False, arguments=None): """Create a new queue specified by name. If the queue already exists, no change is made to the queue, and the return value returns information about the existing queue. The queue name is required and specified as the first argument. If passive is True, the server will not create the queue. The client can use this to check whether a queue exists without modifying the server state. Default is False. If durable is True, the queue will be durable. Durable queues remain active when a server restarts. Non-durable queues ( transient queues) are purged if/when a server restarts. Note that durable queues do not necessarily hold persistent messages, although it does not make sense to send persistent messages to a transient queue. Default is False. If exclusive is True, the queue will be exclusive. Exclusive queues may only be consumed by the current connection. Setting the 'exclusive' flag always implies 'auto-delete'. Default is False. If auto_delete is True, the queue is deleted when all consumers have finished using it. The last consumer can be cancelled either explicitly or because its channel is closed. If there was no consumer ever on the queue, it won't be deleted. Default is True. The nowait parameter is unused. It was part of the 0-9-1 protocol, but this AMQP client implements 0-10 which removed the nowait option. The arguments parameter is a set of arguments for the declaration of the queue. Arguments are passed as a dict or None. This field is ignored if passive is True. Default is None. This method returns a :class:`~collections.namedtuple` with the name 'queue_declare_ok_t' and the queue name as 'queue', message count on the queue as 'message_count', and the number of active consumers as 'consumer_count'. The named tuple values are ordered as queue, message_count, and consumer_count respectively. Due to Celery's non-ACKing of events, a ring policy is set on any queue that starts with the string 'celeryev' or ends with the string 'pidbox'. These are celery event queues, and Celery does not ack them, causing the messages to build-up. Eventually Qpid stops serving messages unless the 'ring' policy is set, at which point the buffer backing the queue becomes circular. :param queue: The name of the queue to be created. :type queue: str :param passive: If True, the sever will not create the queue. :type passive: bool :param durable: If True, the queue will be durable. :type durable: bool :param exclusive: If True, the queue will be exclusive. :type exclusive: bool :param auto_delete: If True, the queue is deleted when all consumers have finished using it. :type auto_delete: bool :param nowait: This parameter is unused since the 0-10 specification does not include it. :type nowait: bool :param arguments: A set of arguments for the declaration of the queue. :type arguments: dict or None :return: A named tuple representing the declared queue as a named tuple. The tuple values are ordered as queue, message count, and the active consumer count. :rtype: :class:`~collections.namedtuple` """ options = {'passive': passive, 'durable': durable, 'exclusive': exclusive, 'auto-delete': auto_delete, 'arguments': arguments} if queue.startswith('celeryev') or queue.endswith('pidbox'): options['qpid.policy_type'] = 'ring' try: self._broker.addQueue(queue, options=options) except Exception as exc: if OBJECT_ALREADY_EXISTS_STRING not in str(exc): raise exc queue_to_check = self._broker.getQueue(queue) message_count = queue_to_check.values['msgDepth'] consumer_count = queue_to_check.values['consumerCount'] return amqp.protocol.queue_declare_ok_t(queue, message_count, consumer_count) def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): """Delete a queue by name. Delete a queue specified by name. Using the if_unused keyword argument, the delete can only occur if there are 0 consumers bound to it. Using the if_empty keyword argument, the delete can only occur if there are 0 messages in the queue. :param queue: The name of the queue to be deleted. :type queue: str :keyword if_unused: If True, delete only if the queue has 0 consumers. If False, delete a queue even with consumers bound to it. :type if_unused: bool :keyword if_empty: If True, only delete the queue if it is empty. If False, delete the queue if it is empty or not. :type if_empty: bool """ if self._has_queue(queue): if if_empty and self._size(queue): return queue_obj = self._broker.getQueue(queue) consumer_count = queue_obj.getAttributes()['consumerCount'] if if_unused and consumer_count > 0: return self._delete(queue) def exchange_declare(self, exchange='', type='direct', durable=False, **kwargs): """Create a new exchange. Create an exchange of a specific type, and optionally have the exchange be durable. If an exchange of the requested name already exists, no action is taken and no exceptions are raised. Durable exchanges will survive a broker restart, non-durable exchanges will not. Exchanges provide behaviors based on their type. The expected behaviors are those defined in the AMQP 0-10 and prior specifications including 'direct', 'topic', and 'fanout' functionality. :keyword type: The exchange type. Valid values include 'direct', 'topic', and 'fanout'. :type type: str :keyword exchange: The name of the exchange to be created. If no exchange is specified, then a blank string will be used as the name. :type exchange: str :keyword durable: True if the exchange should be durable, or False otherwise. :type durable: bool """ options = {'durable': durable} try: self._broker.addExchange(type, exchange, options) except Exception as exc: if OBJECT_ALREADY_EXISTS_STRING not in str(exc): raise exc def exchange_delete(self, exchange_name, **kwargs): """Delete an exchange specified by name. :param exchange_name: The name of the exchange to be deleted. :type exchange_name: str """ self._broker.delExchange(exchange_name) def queue_bind(self, queue, exchange, routing_key, **kwargs): """Bind a queue to an exchange with a bind key. Bind a queue specified by name, to an exchange specified by name, with a specific bind key. The queue and exchange must already exist on the broker for the bind to complete successfully. Queues may be bound to exchanges multiple times with different keys. :param queue: The name of the queue to be bound. :type queue: str :param exchange: The name of the exchange that the queue should be bound to. :type exchange: str :param routing_key: The bind key that the specified queue should bind to the specified exchange with. :type routing_key: str """ self._broker.bind(exchange, queue, routing_key) def queue_unbind(self, queue, exchange, routing_key, **kwargs): """Unbind a queue from an exchange with a given bind key. Unbind a queue specified by name, from an exchange specified by name, that is already bound with a bind key. The queue and exchange must already exist on the broker, and bound with the bind key for the operation to complete successfully. Queues may be bound to exchanges multiple times with different keys, thus the bind key is a required field to unbind in an explicit way. :param queue: The name of the queue to be unbound. :type queue: str :param exchange: The name of the exchange that the queue should be unbound from. :type exchange: str :param routing_key: The existing bind key between the specified queue and a specified exchange that should be unbound. :type routing_key: str """ self._broker.unbind(exchange, queue, routing_key) def queue_purge(self, queue, **kwargs): """Remove all undelivered messages from queue. Purge all undelivered messages from a queue specified by name. If the queue does not exist an exception is raised. The queue message depth is first checked, and then the broker is asked to purge that number of messages. The integer number of messages requested to be purged is returned. The actual number of messages purged may be different than the requested number of messages to purge. Sometimes delivered messages are asked to be purged, but are not. This case fails silently, which is the correct behavior when a message that has been delivered to a different consumer, who has not ACKed the message, and still has an active session with the broker. Messages in that case are not safe for purging and will be retained by the broker. The client is unable to change this delivery behavior. Internally, this method relies on :meth:`_purge`. :param queue: The name of the queue which should have all messages removed. :type queue: str :return: The number of messages requested to be purged. :rtype: int :raises: :class:`qpid.messaging.exceptions.NotFound` if the queue being purged cannot be found. """ return self._purge(queue) def basic_get(self, queue, no_ack=False, **kwargs): """Non-blocking single message get and ACK from a queue by name. Internally this method uses :meth:`_get` to fetch the message. If an :class:`~qpid.messaging.exceptions.Empty` exception is raised by :meth:`_get`, this method silences it and returns None. If :meth:`_get` does return a message, that message is ACKed. The no_ack parameter has no effect on ACKing behavior, and all messages are ACKed in all cases. This method never adds fetched Messages to the internal QoS object for asynchronous ACKing. This method converts the object type of the method as it passes through. Fetching from the broker, :meth:`_get` returns a :class:`qpid.messaging.Message`, but this method takes the payload of the :class:`qpid.messaging.Message` and instantiates a :class:`~kombu.transport.virtual.Message` object with the payload based on the class setting of self.Message. :param queue: The queue name to fetch a message from. :type queue: str :keyword no_ack: The no_ack parameter has no effect on the ACK behavior of this method. Un-ACKed messages create a memory leak in qpid.messaging, and need to be ACKed in all cases. :type noack: bool :return: The received message. :rtype: :class:`~kombu.transport.virtual.Message` """ try: qpid_message = self._get(queue) raw_message = qpid_message.content message = self.Message(raw_message, channel=self) self.transport.session.acknowledge(message=qpid_message) return message except Empty: pass def basic_ack(self, delivery_tag, multiple=False): """Acknowledge a message by delivery_tag. Acknowledges a message referenced by delivery_tag. Messages can only be ACKed using :meth:`basic_ack` if they were acquired using :meth:`basic_consume`. This is the ACKing portion of the asynchronous read behavior. Internally, this method uses the :class:`QoS` object, which stores messages and is responsible for the ACKing. :param delivery_tag: The delivery tag associated with the message to be acknowledged. :type delivery_tag: uuid.UUID :param multiple: not implemented. If set to True an AssertionError is raised. :type multiple: bool """ assert multiple is False self.qos.ack(delivery_tag) def basic_reject(self, delivery_tag, requeue=False): """Reject a message by delivery_tag. Rejects a message that has been received by the Channel, but not yet acknowledged. Messages are referenced by their delivery_tag. If requeue is False, the rejected message will be dropped by the broker and not delivered to any other consumers. If requeue is True, then the rejected message will be requeued for delivery to another consumer, potentially to the same consumer who rejected the message previously. :param delivery_tag: The delivery tag associated with the message to be rejected. :type delivery_tag: uuid.UUID :keyword requeue: If False, the rejected message will be dropped by the broker and not delivered to any other consumers. If True, then the rejected message will be requeued for delivery to another consumer, potentially to the same consumer who rejected the message previously. :type requeue: bool """ self.qos.reject(delivery_tag, requeue=requeue) def basic_consume(self, queue, no_ack, callback, consumer_tag, **kwargs): """Start an asynchronous consumer that reads from a queue. This method starts a consumer of type :class:`~qpid.messaging.endpoints.Receiver` using the :class:`~qpid.messaging.endpoints.Session` created and referenced by the :class:`Transport` that reads messages from a queue specified by name until stopped by a call to :meth:`basic_cancel`. Messages are available later through a synchronous call to :meth:`Transport.drain_events`, which will drain from the consumer started by this method. :meth:`Transport.drain_events` is synchronous, but the receiving of messages over the network occurs asynchronously, so it should still perform well. :meth:`Transport.drain_events` calls the callback provided here with the Message of type self.Message. Each consumer is referenced by a consumer_tag, which is provided by the caller of this method. This method sets up the callback onto the self.connection object in a dict keyed by queue name. :meth:`~Transport.drain_events` is responsible for calling that callback upon message receipt. All messages that are received are added to the QoS object to be saved for asynchronous ACKing later after the message has been handled by the caller of :meth:`~Transport.drain_events`. Messages can be ACKed after being received through a call to :meth:`basic_ack`. If no_ack is True, The no_ack flag indicates that the receiver of the message will not call :meth:`basic_ack` later. Since the message will not be ACKed later, it is ACKed immediately. :meth:`basic_consume` transforms the message object type prior to calling the callback. Initially the message comes in as a :class:`qpid.messaging.Message`. This method unpacks the payload of the :class:`qpid.messaging.Message` and creates a new object of type self.Message. This method wraps the user delivered callback in a runtime-built function which provides the type transformation from :class:`qpid.messaging.Message` to :class:`~kombu.transport.virtual.Message`, and adds the message to the associated :class:`QoS` object for asynchronous ACKing if necessary. :param queue: The name of the queue to consume messages from :type queue: str :param no_ack: If True, then messages will not be saved for ACKing later, but will be ACKed immediately. If False, then messages will be saved for ACKing later with a call to :meth:`basic_ack`. :type no_ack: bool :param callback: a callable that will be called when messages arrive on the queue. :type callback: a callable object :param consumer_tag: a tag to reference the created consumer by. This consumer_tag is needed to cancel the consumer. :type consumer_tag: an immutable object """ self._tag_to_queue[consumer_tag] = queue def _callback(qpid_message): raw_message = qpid_message.content message = self.Message(raw_message, channel=self) delivery_tag = message.delivery_tag self.qos.append(qpid_message, delivery_tag) if no_ack: # Celery will not ack this message later, so we should ack now self.basic_ack(delivery_tag) return callback(message) self.connection._callbacks[queue] = _callback new_receiver = self.transport.session.receiver(queue) new_receiver.capacity = self.qos.prefetch_count self._receivers[consumer_tag] = new_receiver def basic_cancel(self, consumer_tag): """Cancel consumer by consumer tag. Request the consumer stops reading messages from its queue. The consumer is a :class:`~qpid.messaging.endpoints.Receiver`, and it is closed using :meth:`~qpid.messaging.endpoints.Receiver.close`. This method also cleans up all lingering references of the consumer. :param consumer_tag: The tag which refers to the consumer to be cancelled. Originally specified when the consumer was created as a parameter to :meth:`basic_consume`. :type consumer_tag: an immutable object """ if consumer_tag in self._receivers: receiver = self._receivers.pop(consumer_tag) receiver.close() queue = self._tag_to_queue.pop(consumer_tag, None) self.connection._callbacks.pop(queue, None) def close(self): """Cancel all associated messages and close the Channel. This cancels all consumers by calling :meth:`basic_cancel` for each known consumer_tag. It also closes the self._broker sessions. Closing the sessions implicitly causes all outstanding, un-ACKed messages to be considered undelivered by the broker. """ if not self.closed: self.closed = True for consumer_tag in self._receivers.keys(): self.basic_cancel(consumer_tag) if self.connection is not None: self.connection.close_channel(self) self._broker.close() @property def qos(self): """:class:`QoS` manager for this channel. Lazily instantiates an object of type :class:`QoS` upon access to the self.qos attribute. :return: An already existing, or newly created QoS object :rtype: :class:`QoS` """ if self._qos is None: self._qos = self.QoS(self.transport.session) return self._qos def basic_qos(self, prefetch_count, *args): """Change :class:`QoS` settings for this Channel. Set the number of un-acknowledged messages this Channel can fetch and hold. The prefetch_value is also used as the capacity for any new :class:`~qpid.messaging.endpoints.Receiver` objects. Currently, this value is hard coded to 1. :param prefetch_count: Not used. This method is hard-coded to 1. :type prefetch_count: int """ self.qos.prefetch_count = 1 def prepare_message(self, body, priority=None, content_type=None, content_encoding=None, headers=None, properties=None): """Prepare message data for sending. This message is typically called by :meth:`kombu.messaging.Producer._publish` as a preparation step in message publication. :param body: The body of the message :type body: str :keyword priority: A number between 0 and 9 that sets the priority of the message. :type priority: int :keyword content_type: The content_type the message body should be treated as. If this is unset, the :class:`qpid.messaging.endpoints.Sender` object tries to autodetect the content_type from the body. :type content_type: str :keyword content_encoding: The content_encoding the message body is encoded as. :type content_encoding: str :keyword headers: Additional Message headers that should be set. Passed in as a key-value pair. :type headers: dict :keyword properties: Message properties to be set on the message. :type properties: dict :return: Returns a dict object that encapsulates message attributes. See parameters for more details on attributes that can be set. :rtype: dict """ properties = properties or {} info = properties.setdefault('delivery_info', {}) info['priority'] = priority or 0 return {'body': body, 'content-encoding': content_encoding, 'content-type': content_type, 'headers': headers or {}, 'properties': properties or {}} def basic_publish(self, message, exchange, routing_key, **kwargs): """Publish message onto an exchange using a routing key. Publish a message onto an exchange specified by name using a routing key specified by routing_key. Prepares the message in the following ways before sending: - encodes the body using :meth:`encode_body` - wraps the body as a buffer object, so that :class:`qpid.messaging.endpoints.Sender` uses a content type that can support arbitrarily large messages. - sets delivery_tag to a random uuid.UUID - sets the exchange and routing_key info as delivery_info Internally uses :meth:`_put` to send the message synchronously. This message is typically called by :class:`kombu.messaging.Producer._publish` as the final step in message publication. :param message: A dict containing key value pairs with the message data. A valid message dict can be generated using the :meth:`prepare_message` method. :type message: dict :param exchange: The name of the exchange to submit this message onto. :type exchange: str :param routing_key: The routing key to be used as the message is submitted onto the exchange. :type routing_key: str """ message['body'], body_encoding = self.encode_body( message['body'], self.body_encoding, ) message['body'] = buffer(message['body']) props = message['properties'] props.update( body_encoding=body_encoding, delivery_tag=uuid.uuid4(), ) props['delivery_info'].update( exchange=exchange, routing_key=routing_key, ) self._put(routing_key, message, exchange, **kwargs) def encode_body(self, body, encoding=None): """Encode a body using an optionally specified encoding. The encoding can be specified by name, and is looked up in self.codecs. self.codecs uses strings as its keys which specify the name of the encoding, and then the value is an instantiated object that can provide encoding/decoding of that type through encode and decode methods. :param body: The body to be encoded. :type body: str :keyword encoding: The encoding type to be used. Must be a supported codec listed in self.codecs. :type encoding: str :return: If encoding is specified, return a tuple with the first position being the encoded body, and the second position the encoding used. If encoding is not specified, the body is passed through unchanged. :rtype: tuple """ if encoding: return self.codecs.get(encoding).encode(body), encoding return body, encoding def decode_body(self, body, encoding=None): """Decode a body using an optionally specified encoding. The encoding can be specified by name, and is looked up in self.codecs. self.codecs uses strings as its keys which specify the name of the encoding, and then the value is an instantiated object that can provide encoding/decoding of that type through encode and decode methods. :param body: The body to be encoded. :type body: str :keyword encoding: The encoding type to be used. Must be a supported codec listed in self.codecs. :type encoding: str :return: If encoding is specified, the decoded body is returned. If encoding is not specified, the body is returned unchanged. :rtype: str """ if encoding: return self.codecs.get(encoding).decode(body) return body def typeof(self, exchange, default='direct'): """Get the exchange type. Lookup and return the exchange type for an exchange specified by name. Exchange types are expected to be 'direct', 'topic', and 'fanout', which correspond with exchange functionality as specified in AMQP 0-10 and earlier. If the exchange cannot be found, the default exchange type is returned. :param exchange: The exchange to have its type lookup up. :type exchange: str :keyword default: The type of exchange to assume if the exchange does not exist. :type default: str :return: The exchange type either 'direct', 'topic', or 'fanout'. :rtype: str """ qpid_exchange = self._broker.getExchange(exchange) if qpid_exchange: qpid_exchange_attributes = qpid_exchange.getAttributes() return qpid_exchange_attributes['type'] else: return default class Connection(object): """Qpid Connection. Encapsulate a connection object for the :class:`~kombu.transport.qpid.Transport`. :param host: The host that connections should connect to. :param port: The port that connection should connect to. :param username: The username that connections should connect with. Optional. :param password: The password that connections should connect with. Optional but requires a username. :param transport: The transport type that connections should use. Either 'tcp', or 'ssl' are expected as values. :param timeout: the timeout used when a Connection connects to the broker. :param sasl_mechanisms: The sasl authentication mechanism type to use. refer to SASL documentation for an explanation of valid values. .. note:: qpid.messaging has an AuthenticationFailure exception type, but instead raises a ConnectionError with a message that indicates an authentication failure occurred in those situations. ConnectionError is listed as a recoverable error type, so kombu will attempt to retry if a ConnectionError is raised. Retrying the operation without adjusting the credentials is not correct, so this method specifically checks for a ConnectionError that indicates an Authentication Failure occurred. In those situations, the error type is mutated while preserving the original message and raised so kombu will allow the exception to not be considered recoverable. A connection object is created by a :class:`~kombu.transport.qpid.Transport` during a call to :meth:`~kombu.transport.qpid.Transport.establish_connection`. The :class:`~kombu.transport.qpid.Transport` passes in connection options as keywords that should be used for any connections created. Each :class:`~kombu.transport.qpid.Transport` creates exactly one Connection. A Connection object maintains a reference to a :class:`~qpid.messaging.endpoints.Connection` which can be accessed through a bound getter method named :meth:`get_qpid_connection` method. Each Channel uses a the Connection for each :class:`~qpidtoollibs.BrokerAgent`, and the Transport maintains a session for all senders and receivers. The Connection object is also responsible for maintaining the dictionary of references to callbacks that should be called when messages are received. These callbacks are saved in _callbacks, and keyed on the queue name associated with the received message. The _callbacks are setup in :meth:`Channel.basic_consume`, removed in :meth:`Channel.basic_cancel`, and called in :meth:`Transport.drain_events`. The following keys are expected to be passed in as keyword arguments at a minimum: All keyword arguments are collected into the connection_options dict and passed directly through to :meth:`qpid.messaging.endpoints.Connection.establish`. """ # A class reference to the :class:`Channel` object Channel = Channel def __init__(self, **connection_options): self.connection_options = connection_options self.channels = [] self._callbacks = {} self._qpid_conn = None establish = qpid.messaging.Connection.establish # There are several inconsistent behaviors in the sasl libraries # used on different systems. Although qpid.messaging allows # multiple space separated sasl mechanisms, this implementation # only advertises one type to the server. These are either # ANONYMOUS, PLAIN, or an overridden value specified by the user. sasl_mech = connection_options['sasl_mechanisms'] try: msg = _('Attempting to connect to qpid with ' 'SASL mechanism %s') % sasl_mech logger.debug(msg) self._qpid_conn = establish(**self.connection_options) # connection was successful if we got this far msg = _('Connected to qpid with SASL ' 'mechanism %s') % sasl_mech logger.info(msg) except ConnectionError as conn_exc: # if we get one of these errors, do not raise an exception. # Raising will cause the connection to be retried. Instead, # just continue on to the next mech. coded_as_auth_failure = getattr(conn_exc, 'code', None) == 320 contains_auth_fail_text = \ 'Authentication failed' in conn_exc.text contains_mech_fail_text = \ 'sasl negotiation failed: no mechanism agreed' \ in conn_exc.text contains_mech_unavail_text = 'no mechanism available' \ in conn_exc.text if coded_as_auth_failure or \ contains_auth_fail_text or contains_mech_fail_text or \ contains_mech_unavail_text: msg = _('Unable to connect to qpid with SASL ' 'mechanism %s') % sasl_mech logger.error(msg) raise AuthenticationFailure(sys.exc_info()[1]) raise def get_qpid_connection(self): """Return the existing connection (singleton). :return: The existing qpid.messaging.Connection :rtype: :class:`qpid.messaging.endpoints.Connection` """ return self._qpid_conn def close(self): """Close the connection. Closing the connection will close all associated session, senders, or receivers used by the Connection. """ self._qpid_conn.close() def close_channel(self, channel): """Close a Channel. Close a channel specified by a reference to the :class:`~kombu.transport.qpid.Channel` object. :param channel: Channel that should be closed. :type channel: :class:`~kombu.transport.qpid.Channel`. """ try: self.channels.remove(channel) except ValueError: pass finally: channel.connection = None class Transport(base.Transport): """Kombu native transport for a Qpid broker. Provide a native transport for Kombu that allows consumers and producers to read and write messages to/from a broker. This Transport is capable of supporting both synchronous and asynchronous reading. All writes are synchronous through the :class:`Channel` objects that support this Transport. Asynchronous reads are done using a call to :meth:`drain_events`, which synchronously reads messages that were fetched asynchronously, and then handles them through calls to the callback handlers maintained on the :class:`Connection` object. The Transport also provides methods to establish and close a connection to the broker. This Transport establishes a factory-like pattern that allows for singleton pattern to consolidate all Connections into a single one. The Transport can create :class:`Channel` objects to communicate with the broker with using the :meth:`create_channel` method. The Transport identifies recoverable connection errors and recoverable channel errors according to the Kombu 3.0 interface. These exception are listed as tuples and store in the Transport class attribute `recoverable_connection_errors` and `recoverable_channel_errors` respectively. Any exception raised that is not a member of one of these tuples is considered non-recoverable. This allows Kombu support for automatic retry of certain operations to function correctly. For backwards compatibility to the pre Kombu 3.0 exception interface, the recoverable errors are also listed as `connection_errors` and `channel_errors`. """ # Reference to the class that should be used as the Connection object Connection = Connection # This Transport does not specify a polling interval. polling_interval = None # This Transport does support the Celery asynchronous event model. supports_ev = True # The driver type and name for identification purposes. driver_type = 'qpid' driver_name = 'qpid' # Exceptions that can be recovered from, but where the connection must be # closed and re-established first. recoverable_connection_errors = ( ConnectionError, select.error, ) # Exceptions that can be automatically recovered from without # re-establishing the connection. recoverable_channel_errors = ( NotFound, ) # Support the pre 3.0 Kombu exception labeling interface which treats # connection_errors and channel_errors both as recoverable via a # reconnect. connection_errors = recoverable_connection_errors channel_errors = recoverable_channel_errors def __init__(self, *args, **kwargs): self.verify_runtime_environment() super(Transport, self).__init__(*args, **kwargs) self.use_async_interface = False def verify_runtime_environment(self): """Verify that the runtime environment is acceptable. This method is called as part of __init__ and raises a RuntimeError in Python3 or PyPi environments. This module is not compatible with Python3 or PyPi. The RuntimeError identifies this to the user up front along with suggesting Python 2.6+ be used instead. This method also checks that the dependencies qpidtoollibs and qpid.messaging are installed. If either one is not installed a RuntimeError is raised. :raises: RuntimeError if the runtime environment is not acceptable. """ if getattr(sys, 'pypy_version_info', None): raise RuntimeError( 'The Qpid transport for Kombu does not ' 'support PyPy. Try using Python 2.6+', ) if PY3: raise RuntimeError( 'The Qpid transport for Kombu does not ' 'support Python 3. Try using Python 2.6+', ) if dependency_is_none(qpidtoollibs): raise RuntimeError( 'The Python package "qpidtoollibs" is missing. Install it ' 'with your package manager. You can also try `pip install ' 'qpid-tools`.') if dependency_is_none(qpid): raise RuntimeError( 'The Python package "qpid.messaging" is missing. Install it ' 'with your package manager. You can also try `pip install ' 'qpid-python`.') def _qpid_message_ready_handler(self, session): if self.use_async_interface: os.write(self._w, '0') def _qpid_async_exception_notify_handler(self, obj_with_exception, exc): if self.use_async_interface: os.write(self._w, 'e') def on_readable(self, connection, loop): """Handle any messages associated with this Transport. This method clears a single message from the externally monitored file descriptor by issuing a read call to the self.r file descriptor which removes a single '0' character that was placed into the pipe by the Qpid session message callback handler. Once a '0' is read, all available events are drained through a call to :meth:`drain_events`. The file descriptor self.r is modified to be non-blocking, ensuring that an accidental call to this method when no more messages will not cause indefinite blocking. Nothing is expected to be returned from :meth:`drain_events` because :meth:`drain_events` handles messages by calling callbacks that are maintained on the :class:`~kombu.transport.qpid.Connection` object. When :meth:`drain_events` returns, all associated messages have been handled. This method calls drain_events() which reads as many messages as are available for this Transport, and then returns. It blocks in the sense that reading and handling a large number of messages may take time, but it does not block waiting for a new message to arrive. When :meth:`drain_events` is called a timeout is not specified, which causes this behavior. One interesting behavior of note is where multiple messages are ready, and this method removes a single '0' character from self.r, but :meth:`drain_events` may handle an arbitrary amount of messages. In that case, extra '0' characters may be left on self.r to be read, where messages corresponding with those '0' characters have already been handled. The external epoll loop will incorrectly think additional data is ready for reading, and will call on_readable unnecessarily, once for each '0' to be read. Additional calls to :meth:`on_readable` produce no negative side effects, and will eventually clear out the symbols from the self.r file descriptor. If new messages show up during this draining period, they will also be properly handled. :param connection: The connection associated with the readable events, which contains the callbacks that need to be called for the readable objects. :type connection: kombu.transport.qpid.Connection :param loop: The asynchronous loop object that contains epoll like functionality. :type loop: kombu.async.Hub """ os.read(self.r, 1) try: self.drain_events(connection) except socket.timeout: pass def register_with_event_loop(self, connection, loop): """Register a file descriptor and callback with the loop. Register the callback self.on_readable to be called when an external epoll loop sees that the file descriptor registered is ready for reading. The file descriptor is created by this Transport, and is written to when a message is available. Because supports_ev == True, Celery expects to call this method to give the Transport an opportunity to register a read file descriptor for external monitoring by celery using an Event I/O notification mechanism such as epoll. A callback is also registered that is to be called once the external epoll loop is ready to handle the epoll event associated with messages that are ready to be handled for this Transport. The registration call is made exactly once per Transport after the Transport is instantiated. :param connection: A reference to the connection associated with this Transport. :type connection: kombu.transport.qpid.Connection :param loop: A reference to the external loop. :type loop: kombu.async.hub.Hub """ self.r, self._w = os.pipe() if fcntl is not None: fcntl.fcntl(self.r, fcntl.F_SETFL, os.O_NONBLOCK) self.use_async_interface = True loop.add_reader(self.r, self.on_readable, connection, loop) def establish_connection(self): """Establish a Connection object. Determines the correct options to use when creating any connections needed by this Transport, and create a :class:`Connection` object which saves those values for connections generated as they are needed. The options are a mixture of what is passed in through the creator of the Transport, and the defaults provided by :meth:`default_connection_params`. Options cover broker network settings, timeout behaviors, authentication, and identity verification settings. This method also creates and stores a :class:`~qpid.messaging.endpoints.Session` using the :class:`~qpid.messaging.endpoints.Connection` created by this method. The Session is stored on self. :return: The created :class:`Connection` object is returned. :rtype: :class:`Connection` """ conninfo = self.client for name, default_value in items(self.default_connection_params): if not getattr(conninfo, name, None): setattr(conninfo, name, default_value) if conninfo.ssl: conninfo.qpid_transport = 'ssl' conninfo.transport_options['ssl_keyfile'] = conninfo.ssl[ 'keyfile'] conninfo.transport_options['ssl_certfile'] = conninfo.ssl[ 'certfile'] conninfo.transport_options['ssl_trustfile'] = conninfo.ssl[ 'ca_certs'] if conninfo.ssl['cert_reqs'] == ssl.CERT_REQUIRED: conninfo.transport_options['ssl_skip_hostname_check'] = False else: conninfo.transport_options['ssl_skip_hostname_check'] = True else: conninfo.qpid_transport = 'tcp' credentials = {} if conninfo.login_method is None: if conninfo.userid is not None and conninfo.password is not None: sasl_mech = 'PLAIN' credentials['username'] = conninfo.userid credentials['password'] = conninfo.password elif conninfo.userid is None and conninfo.password is not None: raise Exception( 'Password configured but no username. SASL PLAIN ' 'requires a username when using a password.') elif conninfo.userid is not None and conninfo.password is None: raise Exception( 'Username configured but no password. SASL PLAIN ' 'requires a password when using a username.') else: sasl_mech = 'ANONYMOUS' else: sasl_mech = conninfo.login_method if conninfo.userid is not None: credentials['username'] = conninfo.userid opts = { 'host': conninfo.hostname, 'port': conninfo.port, 'sasl_mechanisms': sasl_mech, 'timeout': conninfo.connect_timeout, 'transport': conninfo.qpid_transport } opts.update(credentials) opts.update(conninfo.transport_options) conn = self.Connection(**opts) conn.client = self.client self.session = conn.get_qpid_connection().session() self.session.set_message_received_notify_handler( self._qpid_message_ready_handler ) conn.get_qpid_connection().set_async_exception_notify_handler( self._qpid_async_exception_notify_handler ) self.session.set_async_exception_notify_handler( self._qpid_async_exception_notify_handler ) return conn def close_connection(self, connection): """Close the :class:`Connection` object. :param connection: The Connection that should be closed. :type connection: :class:`kombu.transport.qpid.Connection` """ connection.close() def drain_events(self, connection, timeout=0, **kwargs): """Handle and call callbacks for all ready Transport messages. Drains all events that are ready from all :class:`~qpid.messaging.endpoints.Receiver` that are asynchronously fetching messages. For each drained message, the message is called to the appropriate callback. Callbacks are organized by queue name. :param connection: The :class:`~kombu.transport.qpid.Connection` that contains the callbacks, indexed by queue name, which will be called by this method. :type connection: kombu.transport.qpid.Connection :keyword timeout: The timeout that limits how long this method will run for. The timeout could interrupt a blocking read that is waiting for a new message, or cause this method to return before all messages are drained. Defaults to 0. :type timeout: int """ start_time = monotonic() elapsed_time = -1 while elapsed_time < timeout: try: receiver = self.session.next_receiver(timeout=timeout) message = receiver.fetch() queue = receiver.source except QpidEmpty: raise socket.timeout() else: connection._callbacks[queue](message) elapsed_time = monotonic() - start_time raise socket.timeout() def create_channel(self, connection): """Create and return a :class:`~kombu.transport.qpid.Channel`. Creates a new channel, and appends the channel to the list of channels known by the Connection. Once the new channel is created, it is returned. :param connection: The connection that should support the new :class:`~kombu.transport.qpid.Channel`. :type connection: kombu.transport.qpid.Connection :return: The new Channel that is made. :rtype: :class:`kombu.transport.qpid.Channel`. """ channel = connection.Channel(connection, self) connection.channels.append(channel) return channel @property def default_connection_params(self): """Return a dict with default connection parameters. These connection parameters will be used whenever the creator of Transport does not specify a required parameter. :return: A dict containing the default parameters. :rtype: dict """ return { 'hostname': 'localhost', 'port': 5672, } def __del__(self): """Ensure file descriptors opened in __init__() are closed.""" if getattr(self, 'use_async_interface', False): for fd in (self.r, self._w): try: os.close(fd) except OSError: # ignored pass kombu-4.1.0/kombu/transport/pyro.py0000644000175000017500000000471413130603207017274 0ustar omeromer00000000000000"""Pyro transport. Requires the :mod:`Pyro4` library to be installed. """ from __future__ import absolute_import, unicode_literals import sys from kombu.five import reraise from kombu.utils.objects import cached_property from . import virtual try: import Pyro4 as pyro from Pyro4.errors import NamingError except ImportError: # pragma: no cover pyro = NamingError = None # noqa DEFAULT_PORT = 9090 E_LOOKUP = """\ Unable to locate pyro nameserver {0.virtual_host} on host {0.hostname}\ """ class Channel(virtual.Channel): """Pyro Channel.""" def queues(self): return self.shared_queues.get_queue_names() def _new_queue(self, queue, **kwargs): if queue not in self.queues(): self.shared_queues.new_queue(queue) def _get(self, queue, timeout=None): queue = self._queue_for(queue) msg = self.shared_queues._get(queue) return msg def _queue_for(self, queue): if queue not in self.queues(): self.shared_queues.new_queue(queue) return queue def _put(self, queue, message, **kwargs): queue = self._queue_for(queue) self.shared_queues._put(queue, message) def _size(self, queue): return self.shared_queues._size(queue) def _delete(self, queue, *args, **kwargs): self.shared_queues._delete(queue) def _purge(self, queue): return self.shared_queues._purge(queue) def after_reply_message_received(self, queue): pass @cached_property def shared_queues(self): return self.connection.shared_queues class Transport(virtual.Transport): """Pyro Transport.""" Channel = Channel #: memory backend state is global. state = virtual.BrokerState() default_port = DEFAULT_PORT driver_type = driver_name = 'pyro' def _open(self): conninfo = self.client pyro.config.HMAC_KEY = conninfo.virtual_host try: nameserver = pyro.locateNS(host=conninfo.hostname, port=self.default_port) # name of registered pyro object uri = nameserver.lookup(conninfo.virtual_host) return pyro.Proxy(uri) except NamingError: reraise(NamingError, NamingError(E_LOOKUP.format(conninfo)), sys.exc_info()[2]) def driver_version(self): return pyro.__version__ @cached_property def shared_queues(self): return self._open() kombu-4.1.0/kombu/transport/SLMQ.py0000644000175000017500000001365613130603207017064 0ustar omeromer00000000000000"""SoftLayer Message Queue transport.""" from __future__ import absolute_import, unicode_literals import socket import string import os from kombu.five import Empty, text_t from kombu.utils.encoding import bytes_to_str, safe_str from kombu.utils.json import loads, dumps from kombu.utils.objects import cached_property from . import virtual try: from softlayer_messaging import get_client from softlayer_messaging.errors import ResponseError except ImportError: # pragma: no cover get_client = ResponseError = None # noqa # dots are replaced by dash, all other punctuation replaced by underscore. CHARS_REPLACE_TABLE = { ord(c): 0x5f for c in string.punctuation if c not in '_' } class Channel(virtual.Channel): """SLMQ Channel.""" default_visibility_timeout = 1800 # 30 minutes. domain_format = 'kombu%(vhost)s' _slmq = None _queue_cache = {} _noack_queues = set() def __init__(self, *args, **kwargs): if get_client is None: raise ImportError( 'SLMQ transport requires the softlayer_messaging library', ) super(Channel, self).__init__(*args, **kwargs) queues = self.slmq.queues() for queue in queues: self._queue_cache[queue] = queue def basic_consume(self, queue, no_ack, *args, **kwargs): if no_ack: self._noack_queues.add(queue) return super(Channel, self).basic_consume(queue, no_ack, *args, **kwargs) def basic_cancel(self, consumer_tag): if consumer_tag in self._consumers: queue = self._tag_to_queue[consumer_tag] self._noack_queues.discard(queue) return super(Channel, self).basic_cancel(consumer_tag) def entity_name(self, name, table=CHARS_REPLACE_TABLE): """Format AMQP queue name into a valid SLQS queue name.""" return text_t(safe_str(name)).translate(table) def _new_queue(self, queue, **kwargs): """Ensure a queue exists in SLQS.""" queue = self.entity_name(self.queue_name_prefix + queue) try: return self._queue_cache[queue] except KeyError: try: self.slmq.create_queue( queue, visibility_timeout=self.visibility_timeout) except ResponseError: pass q = self._queue_cache[queue] = self.slmq.queue(queue) return q def _delete(self, queue, *args, **kwargs): """Delete queue by name.""" queue_name = self.entity_name(queue) self._queue_cache.pop(queue_name, None) self.slmq.queue(queue_name).delete(force=True) super(Channel, self)._delete(queue_name) def _put(self, queue, message, **kwargs): """Put message onto queue.""" q = self._new_queue(queue) q.push(dumps(message)) def _get(self, queue): """Try to retrieve a single message off ``queue``.""" q = self._new_queue(queue) rs = q.pop(1) if rs['items']: m = rs['items'][0] payload = loads(bytes_to_str(m['body'])) if queue in self._noack_queues: q.message(m['id']).delete() else: payload['properties']['delivery_info'].update({ 'slmq_message_id': m['id'], 'slmq_queue_name': q.name}) return payload raise Empty() def basic_ack(self, delivery_tag): delivery_info = self.qos.get(delivery_tag).delivery_info try: queue = delivery_info['slmq_queue_name'] except KeyError: pass else: self.delete_message(queue, delivery_info['slmq_message_id']) super(Channel, self).basic_ack(delivery_tag) def _size(self, queue): """Return the number of messages in a queue.""" return self._new_queue(queue).detail()['message_count'] def _purge(self, queue): """Delete all current messages in a queue.""" q = self._new_queue(queue) n = 0 l = q.pop(10) while l['items']: for m in l['items']: self.delete_message(queue, m['id']) n += 1 l = q.pop(10) return n def delete_message(self, queue, message_id): q = self.slmq.queue(self.entity_name(queue)) return q.message(message_id).delete() @property def slmq(self): if self._slmq is None: conninfo = self.conninfo account = os.environ.get('SLMQ_ACCOUNT', conninfo.virtual_host) user = os.environ.get('SL_USERNAME', conninfo.userid) api_key = os.environ.get('SL_API_KEY', conninfo.password) host = os.environ.get('SLMQ_HOST', conninfo.hostname) port = os.environ.get('SLMQ_PORT', conninfo.port) secure = bool(os.environ.get( 'SLMQ_SECURE', self.transport_options.get('secure')) or True, ) endpoint = '{0}://{1}{2}'.format( 'https' if secure else 'http', host, ':{0}'.format(port) if port else '', ) self._slmq = get_client(account, endpoint=endpoint) self._slmq.authenticate(user, api_key) return self._slmq @property def conninfo(self): return self.connection.client @property def transport_options(self): return self.connection.client.transport_options @cached_property def visibility_timeout(self): return (self.transport_options.get('visibility_timeout') or self.default_visibility_timeout) @cached_property def queue_name_prefix(self): return self.transport_options.get('queue_name_prefix', '') class Transport(virtual.Transport): """SLMQ Transport.""" Channel = Channel polling_interval = 1 default_port = None connection_errors = ( virtual.Transport.connection_errors + ( ResponseError, socket.error ) ) kombu-4.1.0/kombu/common.py0000644000175000017500000003121613130603207015534 0ustar omeromer00000000000000"""Common Utilities.""" from __future__ import absolute_import, unicode_literals import os import socket import threading from collections import deque from contextlib import contextmanager from functools import partial from itertools import count from uuid import uuid5, uuid4, uuid3, NAMESPACE_OID from amqp import RecoverableConnectionError from .entity import Exchange, Queue from .five import bytes_if_py2, range from .log import get_logger from .serialization import registry as serializers from .utils.uuid import uuid try: from _thread import get_ident except ImportError: # pragma: no cover try: # noqa from thread import get_ident # noqa except ImportError: # pragma: no cover from dummy_thread import get_ident # noqa __all__ = ['Broadcast', 'maybe_declare', 'uuid', 'itermessages', 'send_reply', 'collect_replies', 'insured', 'drain_consumer', 'eventloop'] #: Prefetch count can't exceed short. PREFETCH_COUNT_MAX = 0xFFFF logger = get_logger(__name__) _node_id = None def get_node_id(): global _node_id if _node_id is None: _node_id = uuid4().int return _node_id def generate_oid(node_id, process_id, thread_id, instance): ent = bytes_if_py2('%x-%x-%x-%x' % ( node_id, process_id, thread_id, id(instance))) try: ret = str(uuid3(NAMESPACE_OID, ent)) except ValueError: ret = str(uuid5(NAMESPACE_OID, ent)) return ret def oid_from(instance, threads=True): return generate_oid( get_node_id(), os.getpid(), get_ident() if threads else 0, instance, ) class Broadcast(Queue): """Broadcast queue. Convenience class used to define broadcast queues. Every queue instance will have a unique name, and both the queue and exchange is configured with auto deletion. Arguments: name (str): This is used as the name of the exchange. queue (str): By default a unique id is used for the queue name for every consumer. You can specify a custom queue name here. **kwargs (Any): See :class:`~kombu.Queue` for a list of additional keyword arguments supported. """ attrs = Queue.attrs + (('queue', None),) def __init__(self, name=None, queue=None, auto_delete=True, exchange=None, alias=None, **kwargs): queue = queue or 'bcast.{0}'.format(uuid()) return super(Broadcast, self).__init__( alias=alias or name, queue=queue, name=queue, auto_delete=auto_delete, exchange=(exchange if exchange is not None else Exchange(name, type='fanout')), **kwargs ) def declaration_cached(entity, channel): return entity in channel.connection.client.declared_entities def maybe_declare(entity, channel=None, retry=False, **retry_policy): """Declare entity (cached).""" is_bound = entity.is_bound orig = entity if not is_bound: assert channel entity = entity.bind(channel) if channel is None: assert is_bound channel = entity.channel declared = ident = None if channel.connection and entity.can_cache_declaration: declared = channel.connection.client.declared_entities ident = hash(entity) if ident in declared: return False if retry: return _imaybe_declare(entity, declared, ident, channel, orig, **retry_policy) return _maybe_declare(entity, declared, ident, channel, orig) def _maybe_declare(entity, declared, ident, channel, orig=None): if not channel.connection: raise RecoverableConnectionError('channel disconnected') entity.declare(channel=channel) if declared is not None and ident: declared.add(ident) if orig is not None: orig.name = entity.name return True def _imaybe_declare(entity, declared, ident, channel, orig=None, **retry_policy): return entity.channel.connection.client.ensure( entity, _maybe_declare, **retry_policy)( entity, declared, ident, channel, orig) def drain_consumer(consumer, limit=1, timeout=None, callbacks=None): """Drain messages from consumer instance.""" acc = deque() def on_message(body, message): acc.append((body, message)) consumer.callbacks = [on_message] + (callbacks or []) with consumer: for _ in eventloop(consumer.channel.connection.client, limit=limit, timeout=timeout, ignore_timeouts=True): try: yield acc.popleft() except IndexError: pass def itermessages(conn, channel, queue, limit=1, timeout=None, callbacks=None, **kwargs): """Iterator over messages.""" return drain_consumer( conn.Consumer(queues=[queue], channel=channel, **kwargs), limit=limit, timeout=timeout, callbacks=callbacks, ) def eventloop(conn, limit=None, timeout=None, ignore_timeouts=False): """Best practice generator wrapper around ``Connection.drain_events``. Able to drain events forever, with a limit, and optionally ignoring timeout errors (a timeout of 1 is often used in environments where the socket can get "stuck", and is a best practice for Kombu consumers). ``eventloop`` is a generator. Examples: >>> from kombu.common import eventloop >>> def run(conn): ... it = eventloop(conn, timeout=1, ignore_timeouts=True) ... next(it) # one event consumed, or timed out. ... ... for _ in eventloop(conn, timeout=1, ignore_timeouts=True): ... pass # loop forever. It also takes an optional limit parameter, and timeout errors are propagated by default:: for _ in eventloop(connection, limit=1, timeout=1): pass See Also: :func:`itermessages`, which is an event loop bound to one or more consumers, that yields any messages received. """ for i in limit and range(limit) or count(): try: yield conn.drain_events(timeout=timeout) except socket.timeout: if timeout and not ignore_timeouts: # pragma: no cover raise def send_reply(exchange, req, msg, producer=None, retry=False, retry_policy=None, **props): """Send reply for request. Arguments: exchange (kombu.Exchange, str): Reply exchange req (~kombu.Message): Original request, a message with a ``reply_to`` property. producer (kombu.Producer): Producer instance retry (bool): If true must retry according to the ``reply_policy`` argument. retry_policy (Dict): Retry settings. **props (Any): Extra properties. """ return producer.publish( msg, exchange=exchange, retry=retry, retry_policy=retry_policy, **dict({'routing_key': req.properties['reply_to'], 'correlation_id': req.properties.get('correlation_id'), 'serializer': serializers.type_to_name[req.content_type], 'content_encoding': req.content_encoding}, **props) ) def collect_replies(conn, channel, queue, *args, **kwargs): """Generator collecting replies from ``queue``.""" no_ack = kwargs.setdefault('no_ack', True) received = False try: for body, message in itermessages(conn, channel, queue, *args, **kwargs): if not no_ack: message.ack() received = True yield body finally: if received: channel.after_reply_message_received(queue.name) def _ensure_errback(exc, interval): logger.error( 'Connection error: %r. Retry in %ss\n', exc, interval, exc_info=True, ) @contextmanager def _ignore_errors(conn): try: yield except conn.connection_errors + conn.channel_errors: pass def ignore_errors(conn, fun=None, *args, **kwargs): """Ignore connection and channel errors. The first argument must be a connection object, or any other object with ``connection_error`` and ``channel_error`` attributes. Can be used as a function: .. code-block:: python def example(connection): ignore_errors(connection, consumer.channel.close) or as a context manager: .. code-block:: python def example(connection): with ignore_errors(connection): consumer.channel.close() Note: Connection and channel errors should be properly handled, and not ignored. Using this function is only acceptable in a cleanup phase, like when a connection is lost or at shutdown. """ if fun: with _ignore_errors(conn): return fun(*args, **kwargs) return _ignore_errors(conn) def revive_connection(connection, channel, on_revive=None): if on_revive: on_revive(channel) def insured(pool, fun, args, kwargs, errback=None, on_revive=None, **opts): """Function wrapper to handle connection errors. Ensures function performing broker commands completes despite intermittent connection failures. """ errback = errback or _ensure_errback with pool.acquire(block=True) as conn: conn.ensure_connection(errback=errback) # we cache the channel for subsequent calls, this has to be # reset on revival. channel = conn.default_channel revive = partial(revive_connection, conn, on_revive=on_revive) insured = conn.autoretry(fun, channel, errback=errback, on_revive=revive, **opts) retval, _ = insured(*args, **dict(kwargs, connection=conn)) return retval class QoS(object): """Thread safe increment/decrement of a channels prefetch_count. Arguments: callback (Callable): Function used to set new prefetch count, e.g. ``consumer.qos`` or ``channel.basic_qos``. Will be called with a single ``prefetch_count`` keyword argument. initial_value (int): Initial prefetch count value.. Example: >>> from kombu import Consumer, Connection >>> connection = Connection('amqp://') >>> consumer = Consumer(connection) >>> qos = QoS(consumer.qos, initial_prefetch_count=2) >>> qos.update() # set initial >>> qos.value 2 >>> def in_some_thread(): ... qos.increment_eventually() >>> def in_some_other_thread(): ... qos.decrement_eventually() >>> while 1: ... if qos.prev != qos.value: ... qos.update() # prefetch changed so update. It can be used with any function supporting a ``prefetch_count`` keyword argument:: >>> channel = connection.channel() >>> QoS(channel.basic_qos, 10) >>> def set_qos(prefetch_count): ... print('prefetch count now: %r' % (prefetch_count,)) >>> QoS(set_qos, 10) """ prev = None def __init__(self, callback, initial_value): self.callback = callback self._mutex = threading.RLock() self.value = initial_value or 0 def increment_eventually(self, n=1): """Increment the value, but do not update the channels QoS. Note: The MainThread will be responsible for calling :meth:`update` when necessary. """ with self._mutex: if self.value: self.value = self.value + max(n, 0) return self.value def decrement_eventually(self, n=1): """Decrement the value, but do not update the channels QoS. Note: The MainThread will be responsible for calling :meth:`update` when necessary. """ with self._mutex: if self.value: self.value -= n if self.value < 1: self.value = 1 return self.value def set(self, pcount): """Set channel prefetch_count setting.""" if pcount != self.prev: new_value = pcount if pcount > PREFETCH_COUNT_MAX: logger.warn('QoS: Disabled: prefetch_count exceeds %r', PREFETCH_COUNT_MAX) new_value = 0 logger.debug('basic.qos: prefetch_count->%s', new_value) self.callback(prefetch_count=new_value) self.prev = pcount return pcount def update(self): """Update prefetch count with current value.""" with self._mutex: return self.set(self.value) kombu-4.1.0/kombu/messaging.py0000644000175000017500000005622013130603207016223 0ustar omeromer00000000000000"""Sending and receiving messages.""" from __future__ import absolute_import, unicode_literals from itertools import count from .common import maybe_declare from .compression import compress from .connection import maybe_channel, is_connection from .entity import Exchange, Queue, maybe_delivery_mode from .exceptions import ContentDisallowed from .five import items, python_2_unicode_compatible, text_t, values from .serialization import dumps, prepare_accept_content from .utils.functional import ChannelPromise, maybe_list __all__ = ['Exchange', 'Queue', 'Producer', 'Consumer'] @python_2_unicode_compatible class Producer(object): """Message Producer. Arguments: channel (kombu.Connection, ChannelT): Connection or channel. exchange (Exchange, str): Optional default exchange. routing_key (str): Optional default routing key. serializer (str): Default serializer. Default is `"json"`. compression (str): Default compression method. Default is no compression. auto_declare (bool): Automatically declare the default exchange at instantiation. Default is :const:`True`. on_return (Callable): Callback to call for undeliverable messages, when the `mandatory` or `immediate` arguments to :meth:`publish` is used. This callback needs the following signature: `(exception, exchange, routing_key, message)`. Note that the producer needs to drain events to use this feature. """ #: Default exchange exchange = None #: Default routing key. routing_key = '' #: Default serializer to use. Default is JSON. serializer = None #: Default compression method. Disabled by default. compression = None #: By default, if a defualt exchange is set, #: that exchange will be declare when publishing a message. auto_declare = True #: Basic return callback. on_return = None #: Set if channel argument was a Connection instance (using #: default_channel). __connection__ = None def __init__(self, channel, exchange=None, routing_key=None, serializer=None, auto_declare=None, compression=None, on_return=None): self._channel = channel self.exchange = exchange self.routing_key = routing_key or self.routing_key self.serializer = serializer or self.serializer self.compression = compression or self.compression self.on_return = on_return or self.on_return self._channel_promise = None if self.exchange is None: self.exchange = Exchange('') if auto_declare is not None: self.auto_declare = auto_declare if self._channel: self.revive(self._channel) def __repr__(self): return ''.format(self) def __reduce__(self): return self.__class__, self.__reduce_args__() def __reduce_args__(self): return (None, self.exchange, self.routing_key, self.serializer, self.auto_declare, self.compression) def declare(self): """Declare the exchange. Note: This happens automatically at instantiation when the :attr:`auto_declare` flag is enabled. """ if self.exchange.name: self.exchange.declare() def maybe_declare(self, entity, retry=False, **retry_policy): """Declare exchange if not already declared during this session.""" if entity: return maybe_declare(entity, self.channel, retry, **retry_policy) def _delivery_details(self, exchange, delivery_mode=None, maybe_delivery_mode=maybe_delivery_mode, Exchange=Exchange): if isinstance(exchange, Exchange): return exchange.name, maybe_delivery_mode( delivery_mode or exchange.delivery_mode, ) # exchange is string, so inherit the delivery # mode of our default exchange. return exchange, maybe_delivery_mode( delivery_mode or self.exchange.delivery_mode, ) def publish(self, body, routing_key=None, delivery_mode=None, mandatory=False, immediate=False, priority=0, content_type=None, content_encoding=None, serializer=None, headers=None, compression=None, exchange=None, retry=False, retry_policy=None, declare=None, expiration=None, **properties): """Publish message to the specified exchange. Arguments: body (Any): Message body. routing_key (str): Message routing key. delivery_mode (enum): See :attr:`delivery_mode`. mandatory (bool): Currently not supported. immediate (bool): Currently not supported. priority (int): Message priority. A number between 0 and 9. content_type (str): Content type. Default is auto-detect. content_encoding (str): Content encoding. Default is auto-detect. serializer (str): Serializer to use. Default is auto-detect. compression (str): Compression method to use. Default is none. headers (Dict): Mapping of arbitrary headers to pass along with the message body. exchange (Exchange, str): Override the exchange. Note that this exchange must have been declared. declare (Sequence[EntityT]): Optional list of required entities that must have been declared before publishing the message. The entities will be declared using :func:`~kombu.common.maybe_declare`. retry (bool): Retry publishing, or declaring entities if the connection is lost. retry_policy (Dict): Retry configuration, this is the keywords supported by :meth:`~kombu.Connection.ensure`. expiration (float): A TTL in seconds can be specified per message. Default is no expiration. **properties (Any): Additional message properties, see AMQP spec. """ _publish = self._publish declare = [] if declare is None else declare headers = {} if headers is None else headers retry_policy = {} if retry_policy is None else retry_policy routing_key = self.routing_key if routing_key is None else routing_key compression = self.compression if compression is None else compression exchange_name, properties['delivery_mode'] = self._delivery_details( exchange or self.exchange, delivery_mode, ) if expiration is not None: properties['expiration'] = str(int(expiration * 1000)) body, content_type, content_encoding = self._prepare( body, serializer, content_type, content_encoding, compression, headers) if self.auto_declare and self.exchange.name: if self.exchange not in declare: # XXX declare should be a Set. declare.append(self.exchange) if retry: _publish = self.connection.ensure(self, _publish, **retry_policy) return _publish( body, priority, content_type, content_encoding, headers, properties, routing_key, mandatory, immediate, exchange_name, declare, ) def _publish(self, body, priority, content_type, content_encoding, headers, properties, routing_key, mandatory, immediate, exchange, declare): channel = self.channel message = channel.prepare_message( body, priority, content_type, content_encoding, headers, properties, ) if declare: maybe_declare = self.maybe_declare [maybe_declare(entity) for entity in declare] # handle autogenerated queue names for reply_to reply_to = properties.get('reply_to') if isinstance(reply_to, Queue): properties['reply_to'] = reply_to.name return channel.basic_publish( message, exchange=exchange, routing_key=routing_key, mandatory=mandatory, immediate=immediate, ) def _get_channel(self): channel = self._channel if isinstance(channel, ChannelPromise): channel = self._channel = channel() self.exchange.revive(channel) if self.on_return: channel.events['basic_return'].add(self.on_return) return channel def _set_channel(self, channel): self._channel = channel channel = property(_get_channel, _set_channel) def revive(self, channel): """Revive the producer after connection loss.""" if is_connection(channel): connection = channel self.__connection__ = connection channel = ChannelPromise(lambda: connection.default_channel) if isinstance(channel, ChannelPromise): self._channel = channel self.exchange = self.exchange(channel) else: # Channel already concrete self._channel = channel if self.on_return: self._channel.events['basic_return'].add(self.on_return) self.exchange = self.exchange(channel) def __enter__(self): return self def __exit__(self, *exc_info): self.release() def release(self): pass close = release def _prepare(self, body, serializer=None, content_type=None, content_encoding=None, compression=None, headers=None): # No content_type? Then we're serializing the data internally. if not content_type: serializer = serializer or self.serializer (content_type, content_encoding, body) = dumps(body, serializer=serializer) else: # If the programmer doesn't want us to serialize, # make sure content_encoding is set. if isinstance(body, text_t): if not content_encoding: content_encoding = 'utf-8' body = body.encode(content_encoding) # If they passed in a string, we can't know anything # about it. So assume it's binary data. elif not content_encoding: content_encoding = 'binary' if compression: body, headers['compression'] = compress(body, compression) return body, content_type, content_encoding @property def connection(self): try: return self.__connection__ or self.channel.connection.client except AttributeError: pass @python_2_unicode_compatible class Consumer(object): """Message consumer. Arguments: channel (kombu.Connection, ChannelT): see :attr:`channel`. queues (Sequence[kombu.Queue]): see :attr:`queues`. no_ack (bool): see :attr:`no_ack`. auto_declare (bool): see :attr:`auto_declare` callbacks (Sequence[Callable]): see :attr:`callbacks`. on_message (Callable): See :attr:`on_message` on_decode_error (Callable): see :attr:`on_decode_error`. prefetch_count (int): see :attr:`prefetch_count`. """ ContentDisallowed = ContentDisallowed #: The connection/channel to use for this consumer. channel = None #: A single :class:`~kombu.Queue`, or a list of queues to #: consume from. queues = None #: Flag for automatic message acknowledgment. #: If enabled the messages are automatically acknowledged by the #: broker. This can increase performance but means that you #: have no control of when the message is removed. #: #: Disabled by default. no_ack = None #: By default all entities will be declared at instantiation, if you #: want to handle this manually you can set this to :const:`False`. auto_declare = True #: List of callbacks called in order when a message is received. #: #: The signature of the callbacks must take two arguments: #: `(body, message)`, which is the decoded message body and #: the :class:`~kombu.Message` instance. callbacks = None #: Optional function called whenever a message is received. #: #: When defined this function will be called instead of the #: :meth:`receive` method, and :attr:`callbacks` will be disabled. #: #: So this can be used as an alternative to :attr:`callbacks` when #: you don't want the body to be automatically decoded. #: Note that the message will still be decompressed if the message #: has the ``compression`` header set. #: #: The signature of the callback must take a single argument, #: which is the :class:`~kombu.Message` object. #: #: Also note that the ``message.body`` attribute, which is the raw #: contents of the message body, may in some cases be a read-only #: :class:`buffer` object. on_message = None #: Callback called when a message can't be decoded. #: #: The signature of the callback must take two arguments: `(message, #: exc)`, which is the message that can't be decoded and the exception #: that occurred while trying to decode it. on_decode_error = None #: List of accepted content-types. #: #: An exception will be raised if the consumer receives #: a message with an untrusted content type. #: By default all content-types are accepted, but not if #: :func:`kombu.disable_untrusted_serializers` was called, #: in which case only json is allowed. accept = None #: Initial prefetch count #: #: If set, the consumer will set the prefetch_count QoS value at startup. #: Can also be changed using :meth:`qos`. prefetch_count = None #: Mapping of queues we consume from. _queues = None _tags = count(1) # global def __init__(self, channel, queues=None, no_ack=None, auto_declare=None, callbacks=None, on_decode_error=None, on_message=None, accept=None, prefetch_count=None, tag_prefix=None): self.channel = channel self.queues = maybe_list(queues or []) self.no_ack = self.no_ack if no_ack is None else no_ack self.callbacks = (self.callbacks or [] if callbacks is None else callbacks) self.on_message = on_message self.tag_prefix = tag_prefix self._active_tags = {} if auto_declare is not None: self.auto_declare = auto_declare if on_decode_error is not None: self.on_decode_error = on_decode_error self.accept = prepare_accept_content(accept) self.prefetch_count = prefetch_count if self.channel: self.revive(self.channel) @property def queues(self): return list(self._queues.values()) @queues.setter def queues(self, queues): self._queues = {q.name: q for q in queues} def revive(self, channel): """Revive consumer after connection loss.""" self._active_tags.clear() channel = self.channel = maybe_channel(channel) # modify dict size while iterating over it is not allowed for qname, queue in list(items(self._queues)): # name may have changed after declare self._queues.pop(qname, None) queue = self._queues[queue.name] = queue(self.channel) queue.revive(channel) if self.auto_declare: self.declare() if self.prefetch_count is not None: self.qos(prefetch_count=self.prefetch_count) def declare(self): """Declare queues, exchanges and bindings. Note: This is done automatically at instantiation when :attr:`auto_declare` is set. """ for queue in values(self._queues): queue.declare() def register_callback(self, callback): """Register a new callback to be called when a message is received. Note: The signature of the callback needs to accept two arguments: `(body, message)`, which is the decoded message body and the :class:`~kombu.Message` instance. """ self.callbacks.append(callback) def __enter__(self): self.consume() return self def __exit__(self, exc_type, exc_val, exc_tb): if self.channel and self.channel.connection: conn_errors = self.channel.connection.client.connection_errors if not isinstance(exc_val, conn_errors): try: self.cancel() except Exception: pass def add_queue(self, queue): """Add a queue to the list of queues to consume from. Note: This will not start consuming from the queue, for that you will have to call :meth:`consume` after. """ queue = queue(self.channel) if self.auto_declare: queue.declare() self._queues[queue.name] = queue return queue def consume(self, no_ack=None): """Start consuming messages. Can be called multiple times, but note that while it will consume from new queues added since the last call, it will not cancel consuming from removed queues ( use :meth:`cancel_by_queue`). Arguments: no_ack (bool): See :attr:`no_ack`. """ queues = list(values(self._queues)) if queues: no_ack = self.no_ack if no_ack is None else no_ack H, T = queues[:-1], queues[-1] for queue in H: self._basic_consume(queue, no_ack=no_ack, nowait=True) self._basic_consume(T, no_ack=no_ack, nowait=False) def cancel(self): """End all active queue consumers. Note: This does not affect already delivered messages, but it does mean the server will not send any more messages for this consumer. """ cancel = self.channel.basic_cancel for tag in values(self._active_tags): cancel(tag) self._active_tags.clear() close = cancel def cancel_by_queue(self, queue): """Cancel consumer by queue name.""" qname = queue.name if isinstance(queue, Queue) else queue try: tag = self._active_tags.pop(qname) except KeyError: pass else: self.channel.basic_cancel(tag) finally: self._queues.pop(qname, None) def consuming_from(self, queue): """Return :const:`True` if currently consuming from queue'.""" name = queue if isinstance(queue, Queue): name = queue.name return name in self._active_tags def purge(self): """Purge messages from all queues. Warning: This will *delete all ready messages*, there is no undo operation. """ return sum(queue.purge() for queue in values(self._queues)) def flow(self, active): """Enable/disable flow from peer. This is a simple flow-control mechanism that a peer can use to avoid overflowing its queues or otherwise finding itself receiving more messages than it can process. The peer that receives a request to stop sending content will finish sending the current content (if any), and then wait until flow is reactivated. """ self.channel.flow(active) def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): """Specify quality of service. The client can request that messages should be sent in advance so that when the client finishes processing a message, the following message is already held locally, rather than needing to be sent down the channel. Prefetching gives a performance improvement. The prefetch window is Ignored if the :attr:`no_ack` option is set. Arguments: prefetch_size (int): Specify the prefetch window in octets. The server will send a message in advance if it is equal to or smaller in size than the available prefetch size (and also falls within other prefetch limits). May be set to zero, meaning "no specific limit", although other prefetch limits may still apply. prefetch_count (int): Specify the prefetch window in terms of whole messages. apply_global (bool): Apply new settings globally on all channels. """ return self.channel.basic_qos(prefetch_size, prefetch_count, apply_global) def recover(self, requeue=False): """Redeliver unacknowledged messages. Asks the broker to redeliver all unacknowledged messages on the specified channel. Arguments: requeue (bool): By default the messages will be redelivered to the original recipient. With `requeue` set to true, the server will attempt to requeue the message, potentially then delivering it to an alternative subscriber. """ return self.channel.basic_recover(requeue=requeue) def receive(self, body, message): """Method called when a message is received. This dispatches to the registered :attr:`callbacks`. Arguments: body (Any): The decoded message body. message (~kombu.Message): The message instance. Raises: NotImplementedError: If no consumer callbacks have been registered. """ callbacks = self.callbacks if not callbacks: raise NotImplementedError('Consumer does not have any callbacks') [callback(body, message) for callback in callbacks] def _basic_consume(self, queue, consumer_tag=None, no_ack=no_ack, nowait=True): tag = self._active_tags.get(queue.name) if tag is None: tag = self._add_tag(queue, consumer_tag) queue.consume(tag, self._receive_callback, no_ack=no_ack, nowait=nowait) return tag def _add_tag(self, queue, consumer_tag=None): tag = consumer_tag or '{0}{1}'.format( self.tag_prefix, next(self._tags)) self._active_tags[queue.name] = tag return tag def _receive_callback(self, message): accept = self.accept on_m, channel, decoded = self.on_message, self.channel, None try: m2p = getattr(channel, 'message_to_python', None) if m2p: message = m2p(message) if accept is not None: message.accept = accept if message.errors: return message._reraise_error(self.on_decode_error) decoded = None if on_m else message.decode() except Exception as exc: if not self.on_decode_error: raise self.on_decode_error(message, exc) else: return on_m(message) if on_m else self.receive(decoded, message) def __repr__(self): return '<{name}: {0.queues}>'.format(self, name=type(self).__name__) @property def connection(self): try: return self.channel.connection.client except AttributeError: pass kombu-4.1.0/kombu/mixins.py0000644000175000017500000002274613130603207015563 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Mixins.""" from __future__ import absolute_import, unicode_literals import socket from contextlib import contextmanager from functools import partial from itertools import count from time import sleep from .common import ignore_errors from .five import range from .messaging import Consumer, Producer from .log import get_logger from .utils.compat import nested from .utils.encoding import safe_repr from .utils.limits import TokenBucket from .utils.objects import cached_property __all__ = ['ConsumerMixin'] logger = get_logger(__name__) debug, info, warn, error = logger.debug, logger.info, logger.warn, logger.error W_CONN_LOST = """\ Connection to broker lost, trying to re-establish connection...\ """ W_CONN_ERROR = """\ Broker connection error, trying again in %s seconds: %r.\ """ class ConsumerMixin(object): """Convenience mixin for implementing consumer programs. It can be used outside of threads, with threads, or greenthreads (eventlet/gevent) too. The basic class would need a :attr:`connection` attribute which must be a :class:`~kombu.Connection` instance, and define a :meth:`get_consumers` method that returns a list of :class:`kombu.Consumer` instances to use. Supporting multiple consumers is important so that multiple channels can be used for different QoS requirements. Example: .. code-block:: python class Worker(ConsumerMixin): task_queue = Queue('tasks', Exchange('tasks'), 'tasks') def __init__(self, connection): self.connection = None def get_consumers(self, Consumer, channel): return [Consumer(queues=[self.task_queue], callbacks=[self.on_task])] def on_task(self, body, message): print('Got task: {0!r}'.format(body)) message.ack() Methods: * :meth:`extra_context` Optional extra context manager that will be entered after the connection and consumers have been set up. Takes arguments ``(connection, channel)``. * :meth:`on_connection_error` Handler called if the connection is lost/ or is unavailable. Takes arguments ``(exc, interval)``, where interval is the time in seconds when the connection will be retried. The default handler will log the exception. * :meth:`on_connection_revived` Handler called as soon as the connection is re-established after connection failure. Takes no arguments. * :meth:`on_consume_ready` Handler called when the consumer is ready to accept messages. Takes arguments ``(connection, channel, consumers)``. Also keyword arguments to ``consume`` are forwarded to this handler. * :meth:`on_consume_end` Handler called after the consumers are canceled. Takes arguments ``(connection, channel)``. * :meth:`on_iteration` Handler called for every iteration while draining events. Takes no arguments. * :meth:`on_decode_error` Handler called if a consumer was unable to decode the body of a message. Takes arguments ``(message, exc)`` where message is the original message object. The default handler will log the error and acknowledge the message, so if you override make sure to call super, or perform these steps yourself. """ #: maximum number of retries trying to re-establish the connection, #: if the connection is lost/unavailable. connect_max_retries = None #: When this is set to true the consumer should stop consuming #: and return, so that it can be joined if it is the implementation #: of a thread. should_stop = False def get_consumers(self, Consumer, channel): raise NotImplementedError('Subclass responsibility') def on_connection_revived(self): pass def on_consume_ready(self, connection, channel, consumers, **kwargs): pass def on_consume_end(self, connection, channel): pass def on_iteration(self): pass def on_decode_error(self, message, exc): error("Can't decode message body: %r (type:%r encoding:%r raw:%r')", exc, message.content_type, message.content_encoding, safe_repr(message.body)) message.ack() def on_connection_error(self, exc, interval): warn(W_CONN_ERROR, interval, exc, exc_info=1) @contextmanager def extra_context(self, connection, channel): yield def run(self, _tokens=1, **kwargs): restart_limit = self.restart_limit errors = (self.connection.connection_errors + self.connection.channel_errors) while not self.should_stop: try: if restart_limit.can_consume(_tokens): # pragma: no cover for _ in self.consume(limit=None, **kwargs): pass else: sleep(restart_limit.expected_time(_tokens)) except errors: warn(W_CONN_LOST, exc_info=1) @contextmanager def consumer_context(self, **kwargs): with self.Consumer() as (connection, channel, consumers): with self.extra_context(connection, channel): self.on_consume_ready(connection, channel, consumers, **kwargs) yield connection, channel, consumers def consume(self, limit=None, timeout=None, safety_interval=1, **kwargs): elapsed = 0 with self.consumer_context(**kwargs) as (conn, channel, consumers): for i in limit and range(limit) or count(): if self.should_stop: break self.on_iteration() try: conn.drain_events(timeout=safety_interval) except socket.timeout: conn.heartbeat_check() elapsed += safety_interval if timeout and elapsed >= timeout: raise except socket.error: if not self.should_stop: raise else: yield elapsed = 0 debug('consume exiting') def maybe_conn_error(self, fun): """Use :func:`kombu.common.ignore_errors` instead.""" return ignore_errors(self, fun) def create_connection(self): return self.connection.clone() @contextmanager def establish_connection(self): with self.create_connection() as conn: conn.ensure_connection(self.on_connection_error, self.connect_max_retries) yield conn @contextmanager def Consumer(self): with self.establish_connection() as conn: self.on_connection_revived() info('Connected to %s', conn.as_uri()) channel = conn.default_channel cls = partial(Consumer, channel, on_decode_error=self.on_decode_error) with self._consume_from(*self.get_consumers(cls, channel)) as c: yield conn, channel, c debug('Consumers canceled') self.on_consume_end(conn, channel) debug('Connection closed') def _consume_from(self, *consumers): return nested(*consumers) @cached_property def restart_limit(self): return TokenBucket(1) @cached_property def connection_errors(self): return self.connection.connection_errors @cached_property def channel_errors(self): return self.connection.channel_errors class ConsumerProducerMixin(ConsumerMixin): """Consumer and Producer mixin. Version of ConsumerMixin having separate connection for also publishing messages. Example: .. code-block:: python class Worker(ConsumerProducerMixin): def __init__(self, connection): self.connection = connection def get_consumers(self, Consumer, channel): return [Consumer(queues=Queue('foo'), on_message=self.handle_message, accept='application/json', prefetch_count=10)] def handle_message(self, message): self.producer.publish( {'message': 'hello to you'}, exchange='', routing_key=message.properties['reply_to'], correlation_id=message.properties['correlation_id'], retry=True, ) """ _producer_connection = None def on_consume_end(self, connection, channel): if self._producer_connection is not None: self._producer_connection.close() self._producer_connection = None @property def producer(self): return Producer(self.producer_connection) @property def producer_connection(self): if self._producer_connection is None: conn = self.connection.clone() conn.ensure_connection(self.on_connection_error, self.connect_max_retries) self._producer_connection = conn return self._producer_connection kombu-4.1.0/kombu/message.py0000644000175000017500000001736513130603207015701 0ustar omeromer00000000000000"""Message class.""" from __future__ import absolute_import, unicode_literals import sys from .compression import decompress from .exceptions import MessageStateError from .five import python_2_unicode_compatible, reraise, text_t from .serialization import loads from .utils.functional import dictfilter __all__ = ['Message'] ACK_STATES = {'ACK', 'REJECTED', 'REQUEUED'} IS_PYPY = hasattr(sys, 'pypy_version_info') @python_2_unicode_compatible class Message(object): """Base class for received messages. Keyword Arguments: channel (ChannelT): If message was received, this should be the channel that the message was received on. body (str): Message body. delivery_mode (bool): Set custom delivery mode. Defaults to :attr:`delivery_mode`. priority (int): Message priority, 0 to broker configured max priority, where higher is better. content_type (str): The messages content_type. If content_type is set, no serialization occurs as it is assumed this is either a binary object, or you've done your own serialization. Leave blank if using built-in serialization as our library properly sets content_type. content_encoding (str): The character set in which this object is encoded. Use "binary" if sending in raw binary objects. Leave blank if using built-in serialization as our library properly sets content_encoding. properties (Dict): Message properties. headers (Dict): Message headers. """ MessageStateError = MessageStateError errors = None if not IS_PYPY: # pragma: no cover __slots__ = ( '_state', 'channel', 'delivery_tag', 'content_type', 'content_encoding', 'delivery_info', 'headers', 'properties', 'body', '_decoded_cache', 'accept', '__dict__', ) def __init__(self, body=None, delivery_tag=None, content_type=None, content_encoding=None, delivery_info={}, properties=None, headers=None, postencode=None, accept=None, channel=None, **kwargs): self.errors = [] if self.errors is None else self.errors self.channel = channel self.delivery_tag = delivery_tag self.content_type = content_type self.content_encoding = content_encoding self.delivery_info = delivery_info self.headers = headers or {} self.properties = properties or {} self._decoded_cache = None self._state = 'RECEIVED' self.accept = accept compression = self.headers.get('compression') if not self.errors and compression: try: body = decompress(body, compression) except Exception: self.errors.append(sys.exc_info()) if not self.errors and postencode and isinstance(body, text_t): try: body = body.encode(postencode) except Exception: self.errors.append(sys.exc_info()) self.body = body def _reraise_error(self, callback=None): try: reraise(*self.errors[0]) except Exception as exc: if not callback: raise callback(self, exc) def ack(self, multiple=False): """Acknowledge this message as being processed. This will remove the message from the queue. Raises: MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.channel is None: raise self.MessageStateError( 'This message does not have a receiving channel') if self.channel.no_ack_consumers is not None: try: consumer_tag = self.delivery_info['consumer_tag'] except KeyError: pass else: if consumer_tag in self.channel.no_ack_consumers: return if self.acknowledged: raise self.MessageStateError( 'Message already acknowledged with state: {0._state}'.format( self)) self.channel.basic_ack(self.delivery_tag, multiple=multiple) self._state = 'ACK' def ack_log_error(self, logger, errors, multiple=False): try: self.ack(multiple=multiple) except errors as exc: logger.critical("Couldn't ack %r, reason:%r", self.delivery_tag, exc, exc_info=True) def reject_log_error(self, logger, errors, requeue=False): try: self.reject(requeue=requeue) except errors as exc: logger.critical("Couldn't reject %r, reason: %r", self.delivery_tag, exc, exc_info=True) def reject(self, requeue=False): """Reject this message. The message will be discarded by the server. Raises: MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.channel is None: raise self.MessageStateError( 'This message does not have a receiving channel') if self.acknowledged: raise self.MessageStateError( 'Message already acknowledged with state: {0._state}'.format( self)) self.channel.basic_reject(self.delivery_tag, requeue=requeue) self._state = 'REJECTED' def requeue(self): """Reject this message and put it back on the queue. Warning: You must not use this method as a means of selecting messages to process. Raises: MessageStateError: If the message has already been acknowledged/requeued/rejected. """ if self.channel is None: raise self.MessageStateError( 'This message does not have a receiving channel') if self.acknowledged: raise self.MessageStateError( 'Message already acknowledged with state: {0._state}'.format( self)) self.channel.basic_reject(self.delivery_tag, requeue=True) self._state = 'REQUEUED' def decode(self): """Deserialize the message body. Returning the original python structure sent by the publisher. Note: The return value is memoized, use `_decode` to force re-evaluation. """ if not self._decoded_cache: self._decoded_cache = self._decode() return self._decoded_cache def _decode(self): return loads(self.body, self.content_type, self.content_encoding, accept=self.accept) @property def acknowledged(self): """Set to true if the message has been acknowledged.""" return self._state in ACK_STATES @property def payload(self): """The decoded message body.""" return self._decoded_cache if self._decoded_cache else self.decode() def __repr__(self): return '<{0} object at {1:#x} with details {2!r}>'.format( type(self).__name__, id(self), dictfilter( state=self._state, content_type=self.content_type, delivery_tag=self.delivery_tag, body_length=len(self.body) if self.body is not None else None, properties=dictfilter( correlation_id=self.properties.get('correlation_id'), type=self.properties.get('type'), ), delivery_info=dictfilter( exchange=self.delivery_info.get('exchange'), routing_key=self.delivery_info.get('routing_key'), ), ), ) kombu-4.1.0/kombu/serialization.py0000644000175000017500000003571513130603207017131 0ustar omeromer00000000000000"""Serialization utilities.""" from __future__ import absolute_import, unicode_literals import codecs import os import sys import pickle as pypickle try: import cPickle as cpickle except ImportError: # pragma: no cover cpickle = None # noqa from collections import namedtuple from contextlib import contextmanager from io import BytesIO from .exceptions import ( ContentDisallowed, DecodeError, EncodeError, SerializerNotInstalled ) from .five import reraise, text_t from .utils.compat import entrypoints from .utils.encoding import bytes_to_str, str_to_bytes, bytes_t __all__ = ['pickle', 'loads', 'dumps', 'register', 'unregister'] SKIP_DECODE = frozenset(['binary', 'ascii-8bit']) TRUSTED_CONTENT = frozenset(['application/data', 'application/text']) if sys.platform.startswith('java'): # pragma: no cover def _decode(t, coding): return codecs.getdecoder(coding)(t)[0] else: _decode = codecs.decode pickle = cpickle or pypickle pickle_load = pickle.load #: Kombu requires Python 2.5 or later so we use protocol 2 by default. #: There's a new protocol (3) but this is only supported by Python 3. pickle_protocol = int(os.environ.get('PICKLE_PROTOCOL', 2)) codec = namedtuple('codec', ('content_type', 'content_encoding', 'encoder')) @contextmanager def _reraise_errors(wrapper, include=(Exception,), exclude=(SerializerNotInstalled,)): try: yield except exclude: raise except include as exc: reraise(wrapper, wrapper(exc), sys.exc_info()[2]) def pickle_loads(s, load=pickle_load): # used to support buffer objects return load(BytesIO(s)) def parenthesize_alias(first, second): return '%s (%s)' % (first, second) if first else second class SerializerRegistry(object): """The registry keeps track of serialization methods.""" def __init__(self): self._encoders = {} self._decoders = {} self._default_encode = None self._default_content_type = None self._default_content_encoding = None self._disabled_content_types = set() self.type_to_name = {} self.name_to_type = {} def register(self, name, encoder, decoder, content_type, content_encoding='utf-8'): """Register a new encoder/decoder. Arguments: name (str): A convenience name for the serialization method. encoder (callable): A method that will be passed a python data structure and should return a string representing the serialized data. If :const:`None`, then only a decoder will be registered. Encoding will not be possible. decoder (Callable): A method that will be passed a string representing serialized data and should return a python data structure. If :const:`None`, then only an encoder will be registered. Decoding will not be possible. content_type (str): The mime-type describing the serialized structure. content_encoding (str): The content encoding (character set) that the `decoder` method will be returning. Will usually be `utf-8`, `us-ascii`, or `binary`. """ if encoder: self._encoders[name] = codec( content_type, content_encoding, encoder, ) if decoder: self._decoders[content_type] = decoder self.type_to_name[content_type] = name self.name_to_type[name] = content_type def enable(self, name): if '/' not in name: name = self.name_to_type[name] self._disabled_content_types.discard(name) def disable(self, name): if '/' not in name: name = self.name_to_type[name] self._disabled_content_types.add(name) def unregister(self, name): """Unregister registered encoder/decoder. Arguments: name (str): Registered serialization method name. Raises: SerializerNotInstalled: If a serializer by that name cannot be found. """ try: content_type = self.name_to_type[name] self._decoders.pop(content_type, None) self._encoders.pop(name, None) self.type_to_name.pop(content_type, None) self.name_to_type.pop(name, None) except KeyError: raise SerializerNotInstalled( 'No encoder/decoder installed for {0}'.format(name)) def _set_default_serializer(self, name): """Set the default serialization method used by this library. Arguments: name (str): The name of the registered serialization method. For example, `json` (default), `pickle`, `yaml`, `msgpack`, or any custom methods registered using :meth:`register`. Raises: SerializerNotInstalled: If the serialization method requested is not available. """ try: (self._default_content_type, self._default_content_encoding, self._default_encode) = self._encoders[name] except KeyError: raise SerializerNotInstalled( 'No encoder installed for {0}'.format(name)) def dumps(self, data, serializer=None): """Encode data. Serialize a data structure into a string suitable for sending as an AMQP message body. Arguments: data (List, Dict, str): The message data to send. serializer (str): An optional string representing the serialization method you want the data marshalled into. (For example, `json`, `raw`, or `pickle`). If :const:`None` (default), then json will be used, unless `data` is a :class:`str` or :class:`unicode` object. In this latter case, no serialization occurs as it would be unnecessary. Note that if `serializer` is specified, then that serialization method will be used even if a :class:`str` or :class:`unicode` object is passed in. Returns: Tuple[str, str, str]: A three-item tuple containing the content type (e.g., `application/json`), content encoding, (e.g., `utf-8`) and a string containing the serialized data. Raises: SerializerNotInstalled: If the serialization method requested is not available. """ if serializer == 'raw': return raw_encode(data) if serializer and not self._encoders.get(serializer): raise SerializerNotInstalled( 'No encoder installed for {0}'.format(serializer)) # If a raw string was sent, assume binary encoding # (it's likely either ASCII or a raw binary file, and a character # set of 'binary' will encompass both, even if not ideal. if not serializer and isinstance(data, bytes_t): # In Python 3+, this would be "bytes"; allow binary data to be # sent as a message without getting encoder errors return 'application/data', 'binary', data # For Unicode objects, force it into a string if not serializer and isinstance(data, text_t): with _reraise_errors(EncodeError, exclude=()): payload = data.encode('utf-8') return 'text/plain', 'utf-8', payload if serializer: content_type, content_encoding, encoder = \ self._encoders[serializer] else: encoder = self._default_encode content_type = self._default_content_type content_encoding = self._default_content_encoding with _reraise_errors(EncodeError): payload = encoder(data) return content_type, content_encoding, payload def loads(self, data, content_type, content_encoding, accept=None, force=False, _trusted_content=TRUSTED_CONTENT): """Decode serialized data. Deserialize a data stream as serialized using `dumps` based on `content_type`. Arguments: data (bytes, buffer, str): The message data to deserialize. content_type (str): The content-type of the data. (e.g., `application/json`). content_encoding (str): The content-encoding of the data. (e.g., `utf-8`, `binary`, or `us-ascii`). accept (Set): List of content-types to accept. Raises: ContentDisallowed: If the content-type is not accepted. Returns: Any: The unserialized data. """ content_type = (bytes_to_str(content_type) if content_type else 'application/data') if accept is not None: if content_type not in _trusted_content \ and content_type not in accept: raise self._for_untrusted_content(content_type, 'untrusted') else: if content_type in self._disabled_content_types and not force: raise self._for_untrusted_content(content_type, 'disabled') content_encoding = (content_encoding or 'utf-8').lower() if data: decode = self._decoders.get(content_type) if decode: with _reraise_errors(DecodeError): return decode(data) if content_encoding not in SKIP_DECODE and \ not isinstance(data, text_t): with _reraise_errors(DecodeError): return _decode(data, content_encoding) return data def _for_untrusted_content(self, ctype, why): return ContentDisallowed( 'Refusing to deserialize {0} content of type {1}'.format( why, parenthesize_alias(self.type_to_name.get(ctype, ctype), ctype), ), ) #: Global registry of serializers/deserializers. registry = SerializerRegistry() dumps = registry.dumps loads = registry.loads register = registry.register unregister = registry.unregister def raw_encode(data): """Special case serializer.""" content_type = 'application/data' payload = data if isinstance(payload, text_t): content_encoding = 'utf-8' with _reraise_errors(EncodeError, exclude=()): payload = payload.encode(content_encoding) else: content_encoding = 'binary' return content_type, content_encoding, payload def register_json(): """Register a encoder/decoder for JSON serialization.""" from kombu.utils import json as _json registry.register('json', _json.dumps, _json.loads, content_type='application/json', content_encoding='utf-8') def register_yaml(): """Register a encoder/decoder for YAML serialization. It is slower than JSON, but allows for more data types to be serialized. Useful if you need to send data such as dates """ try: import yaml registry.register('yaml', yaml.safe_dump, yaml.safe_load, content_type='application/x-yaml', content_encoding='utf-8') except ImportError: def not_available(*args, **kwargs): """Raise SerializerNotInstalled. Used in case a client receives a yaml message, but yaml isn't installed. """ raise SerializerNotInstalled( 'No decoder installed for YAML. Install the PyYAML library') registry.register('yaml', None, not_available, 'application/x-yaml') if sys.version_info[0] == 3: # pragma: no cover def unpickle(s): return pickle_loads(str_to_bytes(s)) else: unpickle = pickle_loads # noqa def register_pickle(): """Register pickle serializer. The fastest serialization method, but restricts you to python clients. """ def pickle_dumps(obj, dumper=pickle.dumps): return dumper(obj, protocol=pickle_protocol) registry.register('pickle', pickle_dumps, unpickle, content_type='application/x-python-serialize', content_encoding='binary') def register_msgpack(): """Register msgpack serializer. See Also: https://msgpack.org/. """ pack = unpack = None try: import msgpack if msgpack.version >= (0, 4): from msgpack import packb, unpackb def pack(s): return packb(s, use_bin_type=True) def unpack(s): return unpackb(s, encoding='utf-8') else: def version_mismatch(*args, **kwargs): raise SerializerNotInstalled( 'msgpack requires msgpack-python >= 0.4.0') pack = unpack = version_mismatch except (ImportError, ValueError): def not_available(*args, **kwargs): raise SerializerNotInstalled( 'No decoder installed for msgpack. ' 'Please install the msgpack-python library') pack = unpack = not_available registry.register( 'msgpack', pack, unpack, content_type='application/x-msgpack', content_encoding='binary', ) # Register the base serialization methods. register_json() register_pickle() register_yaml() register_msgpack() # Default serializer is 'json' registry._set_default_serializer('json') _setupfuns = { 'json': register_json, 'pickle': register_pickle, 'yaml': register_yaml, 'msgpack': register_msgpack, 'application/json': register_json, 'application/x-yaml': register_yaml, 'application/x-python-serialize': register_pickle, 'application/x-msgpack': register_msgpack, } def enable_insecure_serializers(choices=['pickle', 'yaml', 'msgpack']): """Enable serializers that are considered to be unsafe. Note: Will enable ``pickle``, ``yaml`` and ``msgpack`` by default, but you can also specify a list of serializers (by name or content type) to enable. """ for choice in choices: try: registry.enable(choice) except KeyError: pass def disable_insecure_serializers(allowed=['json']): """Disable untrusted serializers. Will disable all serializers except ``json`` or you can specify a list of deserializers to allow. Note: Producers will still be able to serialize data in these formats, but consumers will not accept incoming data using the untrusted content types. """ for name in registry._decoders: registry.disable(name) if allowed is not None: for name in allowed: registry.enable(name) # Insecure serializers are disabled by default since v3.0 disable_insecure_serializers() # Load entrypoints from installed extensions for ep, args in entrypoints('kombu.serializers'): # pragma: no cover register(ep.name, *args) def prepare_accept_content(l, name_to_type=registry.name_to_type): if l is not None: return {n if '/' in n else name_to_type[n] for n in l} return l kombu-4.1.0/kombu/log.py0000644000175000017500000001030313130603207015017 0ustar omeromer00000000000000"""Logging Utilities.""" from __future__ import absolute_import, unicode_literals import logging import numbers import os import sys from logging.handlers import WatchedFileHandler from .five import string_t from .utils.encoding import safe_repr, safe_str from .utils.functional import maybe_evaluate from .utils.objects import cached_property __all__ = ['LogMixin', 'LOG_LEVELS', 'get_loglevel', 'setup_logging'] try: LOG_LEVELS = dict(logging._nameToLevel) LOG_LEVELS.update(logging._levelToName) except AttributeError: LOG_LEVELS = dict(logging._levelNames) LOG_LEVELS.setdefault('FATAL', logging.FATAL) LOG_LEVELS.setdefault(logging.FATAL, 'FATAL') DISABLE_TRACEBACKS = os.environ.get('DISABLE_TRACEBACKS') def get_logger(logger): """Get logger by name.""" if isinstance(logger, string_t): logger = logging.getLogger(logger) if not logger.handlers: logger.addHandler(logging.NullHandler()) return logger def get_loglevel(level): """Get loglevel by name.""" if isinstance(level, string_t): return LOG_LEVELS[level] return level def naive_format_parts(fmt): parts = fmt.split('%') for i, e in enumerate(parts[1:]): yield None if not e or not parts[i - 1] else e[0] def safeify_format(fmt, args, filters={'s': safe_str, 'r': safe_repr}): for index, type in enumerate(naive_format_parts(fmt)): filt = filters.get(type) yield filt(args[index]) if filt else args[index] class LogMixin(object): """Mixin that adds severity methods to any class.""" def debug(self, *args, **kwargs): return self.log(logging.DEBUG, *args, **kwargs) def info(self, *args, **kwargs): return self.log(logging.INFO, *args, **kwargs) def warn(self, *args, **kwargs): return self.log(logging.WARN, *args, **kwargs) def error(self, *args, **kwargs): return self._error(logging.ERROR, *args, **kwargs) def critical(self, *args, **kwargs): return self._error(logging.CRITICAL, *args, **kwargs) def _error(self, severity, *args, **kwargs): kwargs.setdefault('exc_info', True) if DISABLE_TRACEBACKS: kwargs.pop('exc_info', None) return self.log(severity, *args, **kwargs) def annotate(self, text): return '%s - %s' % (self.logger_name, text) def log(self, severity, *args, **kwargs): if self.logger.isEnabledFor(severity): log = self.logger.log if len(args) > 1 and isinstance(args[0], string_t): expand = [maybe_evaluate(arg) for arg in args[1:]] return log(severity, self.annotate(args[0].replace('%r', '%s')), *list(safeify_format(args[0], expand)), **kwargs) else: return self.logger.log( severity, self.annotate(' '.join(map(safe_str, args))), **kwargs) def get_logger(self): return get_logger(self.logger_name) def is_enabled_for(self, level): return self.logger.isEnabledFor(self.get_loglevel(level)) def get_loglevel(self, level): if not isinstance(level, numbers.Integral): return LOG_LEVELS[level] return level @cached_property def logger(self): return self.get_logger() @property def logger_name(self): return self.__class__.__name__ class Log(LogMixin): def __init__(self, name, logger=None): self._logger_name = name self._logger = logger def get_logger(self): if self._logger: return self._logger return LogMixin.get_logger(self) @property def logger_name(self): return self._logger_name def setup_logging(loglevel=None, logfile=None): """Setup logging.""" logger = logging.getLogger() loglevel = get_loglevel(loglevel or 'ERROR') logfile = logfile if logfile else sys.__stderr__ if not logger.handlers: if hasattr(logfile, 'write'): handler = logging.StreamHandler(logfile) else: handler = WatchedFileHandler(logfile) logger.addHandler(handler) logger.setLevel(loglevel) return logger kombu-4.1.0/kombu/utils/0000755000175000017500000000000013134154263015036 5ustar omeromer00000000000000kombu-4.1.0/kombu/utils/eventio.py0000644000175000017500000002402213130603207017052 0ustar omeromer00000000000000"""Selector Utilities.""" from __future__ import absolute_import, unicode_literals import errno import math import select as __select__ import socket import sys from numbers import Integral from . import fileno from .compat import detect_environment __all__ = ['poll'] _selectf = __select__.select _selecterr = __select__.error xpoll = getattr(__select__, 'poll', None) epoll = getattr(__select__, 'epoll', None) kqueue = getattr(__select__, 'kqueue', None) kevent = getattr(__select__, 'kevent', None) KQ_EV_ADD = getattr(__select__, 'KQ_EV_ADD', 1) KQ_EV_DELETE = getattr(__select__, 'KQ_EV_DELETE', 2) KQ_EV_ENABLE = getattr(__select__, 'KQ_EV_ENABLE', 4) KQ_EV_CLEAR = getattr(__select__, 'KQ_EV_CLEAR', 32) KQ_EV_ERROR = getattr(__select__, 'KQ_EV_ERROR', 16384) KQ_EV_EOF = getattr(__select__, 'KQ_EV_EOF', 32768) KQ_FILTER_READ = getattr(__select__, 'KQ_FILTER_READ', -1) KQ_FILTER_WRITE = getattr(__select__, 'KQ_FILTER_WRITE', -2) KQ_FILTER_AIO = getattr(__select__, 'KQ_FILTER_AIO', -3) KQ_FILTER_VNODE = getattr(__select__, 'KQ_FILTER_VNODE', -4) KQ_FILTER_PROC = getattr(__select__, 'KQ_FILTER_PROC', -5) KQ_FILTER_SIGNAL = getattr(__select__, 'KQ_FILTER_SIGNAL', -6) KQ_FILTER_TIMER = getattr(__select__, 'KQ_FILTER_TIMER', -7) KQ_NOTE_LOWAT = getattr(__select__, 'KQ_NOTE_LOWAT', 1) KQ_NOTE_DELETE = getattr(__select__, 'KQ_NOTE_DELETE', 1) KQ_NOTE_WRITE = getattr(__select__, 'KQ_NOTE_WRITE', 2) KQ_NOTE_EXTEND = getattr(__select__, 'KQ_NOTE_EXTEND', 4) KQ_NOTE_ATTRIB = getattr(__select__, 'KQ_NOTE_ATTRIB', 8) KQ_NOTE_LINK = getattr(__select__, 'KQ_NOTE_LINK', 16) KQ_NOTE_RENAME = getattr(__select__, 'KQ_NOTE_RENAME', 32) KQ_NOTE_REVOKE = getattr(__select__, 'KQ_NOTE_REVOKE', 64) POLLIN = getattr(__select__, 'POLLIN', 1) POLLOUT = getattr(__select__, 'POLLOUT', 4) POLLERR = getattr(__select__, 'POLLERR', 8) POLLHUP = getattr(__select__, 'POLLHUP', 16) POLLNVAL = getattr(__select__, 'POLLNVAL', 32) READ = POLL_READ = 0x001 WRITE = POLL_WRITE = 0x004 ERR = POLL_ERR = 0x008 | 0x010 try: SELECT_BAD_FD = {errno.EBADF, errno.WSAENOTSOCK} except AttributeError: SELECT_BAD_FD = {errno.EBADF} class _epoll(object): def __init__(self): self._epoll = epoll() def register(self, fd, events): try: self._epoll.register(fd, events) except Exception as exc: if getattr(exc, 'errno', None) != errno.EEXIST: raise return fd def unregister(self, fd): try: self._epoll.unregister(fd) except (socket.error, ValueError, KeyError, TypeError): pass except (IOError, OSError) as exc: if getattr(exc, 'errno', None) not in (errno.ENOENT, errno.EPERM): raise def poll(self, timeout): try: return self._epoll.poll(timeout if timeout is not None else -1) except Exception as exc: if getattr(exc, 'errno', None) != errno.EINTR: raise def close(self): self._epoll.close() class _kqueue(object): w_fflags = (KQ_NOTE_WRITE | KQ_NOTE_EXTEND | KQ_NOTE_ATTRIB | KQ_NOTE_DELETE) def __init__(self): self._kqueue = kqueue() self._active = {} self.on_file_change = None self._kcontrol = self._kqueue.control def register(self, fd, events): self._control(fd, events, KQ_EV_ADD) self._active[fd] = events return fd def unregister(self, fd): events = self._active.pop(fd, None) if events: try: self._control(fd, events, KQ_EV_DELETE) except socket.error: pass def watch_file(self, fd): ev = kevent(fd, filter=KQ_FILTER_VNODE, flags=KQ_EV_ADD | KQ_EV_ENABLE | KQ_EV_CLEAR, fflags=self.w_fflags) self._kcontrol([ev], 0) def unwatch_file(self, fd): ev = kevent(fd, filter=KQ_FILTER_VNODE, flags=KQ_EV_DELETE, fflags=self.w_fflags) self._kcontrol([ev], 0) def _control(self, fd, events, flags): if not events: return kevents = [] if events & WRITE: kevents.append(kevent(fd, filter=KQ_FILTER_WRITE, flags=flags)) if not kevents or events & READ: kevents.append( kevent(fd, filter=KQ_FILTER_READ, flags=flags), ) control = self._kcontrol for e in kevents: try: control([e], 0) except ValueError: pass def poll(self, timeout): try: kevents = self._kcontrol(None, 1000, timeout) except Exception as exc: if getattr(exc, 'errno', None) == errno.EINTR: return raise events, file_changes = {}, [] for k in kevents: fd = k.ident if k.filter == KQ_FILTER_READ: events[fd] = events.get(fd, 0) | READ elif k.filter == KQ_FILTER_WRITE: if k.flags & KQ_EV_EOF: events[fd] = ERR else: events[fd] = events.get(fd, 0) | WRITE elif k.filter == KQ_EV_ERROR: events[fd] = events.get(fd, 0) | ERR elif k.filter == KQ_FILTER_VNODE: if k.fflags & KQ_NOTE_DELETE: self.unregister(fd) file_changes.append(k) if file_changes: self.on_file_change(file_changes) return list(events.items()) def close(self): self._kqueue.close() class _poll(object): def __init__(self): self._poller = xpoll() self._quick_poll = self._poller.poll self._quick_register = self._poller.register self._quick_unregister = self._poller.unregister def register(self, fd, events): fd = fileno(fd) poll_flags = 0 if events & ERR: poll_flags |= POLLERR if events & WRITE: poll_flags |= POLLOUT if events & READ: poll_flags |= POLLIN self._quick_register(fd, poll_flags) return fd def unregister(self, fd): try: fd = fileno(fd) except socket.error as exc: # we don't know the previous fd of this object # but it will be removed by the next poll iteration. if getattr(exc, 'errno', None) in SELECT_BAD_FD: return fd raise self._quick_unregister(fd) return fd def poll(self, timeout, round=math.ceil, POLLIN=POLLIN, POLLOUT=POLLOUT, POLLERR=POLLERR, READ=READ, WRITE=WRITE, ERR=ERR, Integral=Integral): timeout = 0 if timeout and timeout < 0 else round((timeout or 0) * 1e3) try: event_list = self._quick_poll(timeout) except (_selecterr, socket.error) as exc: if getattr(exc, 'errno', None) == errno.EINTR: return raise ready = [] for fd, event in event_list: events = 0 if event & POLLIN: events |= READ if event & POLLOUT: events |= WRITE if event & POLLERR or event & POLLNVAL or event & POLLHUP: events |= ERR assert events if not isinstance(fd, Integral): fd = fd.fileno() ready.append((fd, events)) return ready def close(self): self._poller = None class _select(object): def __init__(self): self._all = (self._rfd, self._wfd, self._efd) = set(), set(), set() def register(self, fd, events): fd = fileno(fd) if events & ERR: self._efd.add(fd) if events & WRITE: self._wfd.add(fd) if events & READ: self._rfd.add(fd) return fd def _remove_bad(self): for fd in self._rfd | self._wfd | self._efd: try: _selectf([fd], [], [], 0) except (_selecterr, socket.error) as exc: if getattr(exc, 'errno', None) in SELECT_BAD_FD: self.unregister(fd) def unregister(self, fd): try: fd = fileno(fd) except socket.error as exc: # we don't know the previous fd of this object # but it will be removed by the next poll iteration. if getattr(exc, 'errno', None) in SELECT_BAD_FD: return raise self._rfd.discard(fd) self._wfd.discard(fd) self._efd.discard(fd) def poll(self, timeout): try: read, write, error = _selectf( self._rfd, self._wfd, self._efd, timeout, ) except (_selecterr, socket.error) as exc: if getattr(exc, 'errno', None) == errno.EINTR: return elif getattr(exc, 'errno', None) in SELECT_BAD_FD: return self._remove_bad() raise events = {} for fd in read: if not isinstance(fd, Integral): fd = fd.fileno() events[fd] = events.get(fd, 0) | READ for fd in write: if not isinstance(fd, Integral): fd = fd.fileno() events[fd] = events.get(fd, 0) | WRITE for fd in error: if not isinstance(fd, Integral): fd = fd.fileno() events[fd] = events.get(fd, 0) | ERR return list(events.items()) def close(self): self._rfd.clear() self._wfd.clear() self._efd.clear() def _get_poller(): if detect_environment() != 'default': # greenlet return _select elif epoll: # Py2.6+ Linux return _epoll elif kqueue and 'netbsd' in sys.platform: return _kqueue elif xpoll: return _poll else: return _select def poll(*args, **kwargs): """Create new poller instance.""" return _get_poller()(*args, **kwargs) kombu-4.1.0/kombu/utils/encoding.py0000644000175000017500000000722213130603207017172 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Text encoding utilities. Utilities to encode text, and to safely emit text from running applications without crashing from the infamous :exc:`UnicodeDecodeError` exception. """ from __future__ import absolute_import, unicode_literals import sys import traceback from kombu.five import text_t is_py3k = sys.version_info >= (3, 0) #: safe_str takes encoding from this file by default. #: :func:`set_default_encoding_file` can used to set the #: default output file. default_encoding_file = None def set_default_encoding_file(file): """Set file used to get codec information.""" global default_encoding_file default_encoding_file = file def get_default_encoding_file(): """Get file used to get codec information.""" return default_encoding_file if sys.platform.startswith('java'): # pragma: no cover def default_encoding(file=None): """Get default encoding.""" return 'utf-8' else: def default_encoding(file=None): # noqa """Get default encoding.""" file = file or get_default_encoding_file() return getattr(file, 'encoding', None) or sys.getfilesystemencoding() if is_py3k: # pragma: no cover def str_to_bytes(s): """Convert str to bytes.""" if isinstance(s, str): return s.encode() return s def bytes_to_str(s): """Convert bytes to str.""" if isinstance(s, bytes): return s.decode() return s def from_utf8(s, *args, **kwargs): """Get str from utf-8 encoding.""" return s def ensure_bytes(s): """Ensure s is bytes, not str.""" if not isinstance(s, bytes): return str_to_bytes(s) return s def default_encode(obj): """Encode using default encoding.""" return obj str_t = str else: def str_to_bytes(s): # noqa """Convert str to bytes.""" if isinstance(s, unicode): return s.encode() return s def bytes_to_str(s): # noqa """Convert bytes to str.""" return s def from_utf8(s, *args, **kwargs): # noqa """Convert utf-8 to ASCII.""" return s.encode('utf-8', *args, **kwargs) def default_encode(obj, file=None): # noqa """Get default encoding.""" return unicode(obj, default_encoding(file)) str_t = unicode ensure_bytes = str_to_bytes try: bytes_t = bytes except NameError: # pragma: no cover bytes_t = str # noqa def safe_str(s, errors='replace'): """Safe form of str(), void of unicode errors.""" s = bytes_to_str(s) if not isinstance(s, (text_t, bytes)): return safe_repr(s, errors) return _safe_str(s, errors) if is_py3k: # pragma: no cover def _safe_str(s, errors='replace', file=None): if isinstance(s, str): return s try: return str(s) except Exception as exc: return ''.format( type(s), exc, '\n'.join(traceback.format_stack())) else: def _safe_str(s, errors='replace', file=None): # noqa encoding = default_encoding(file) try: if isinstance(s, unicode): return s.encode(encoding, errors) return unicode(s, encoding, errors) except Exception as exc: return ''.format( type(s), exc, '\n'.join(traceback.format_stack())) def safe_repr(o, errors='replace'): """Safe form of repr, void of Unicode errors.""" try: return repr(o) except Exception: return _safe_str(o, errors) kombu-4.1.0/kombu/utils/time.py0000644000175000017500000000046413130603207016343 0ustar omeromer00000000000000"""Time Utilities.""" from __future__ import absolute_import, unicode_literals __all__ = ['maybe_s_to_ms'] def maybe_s_to_ms(v): # type: (Optional[Union[int, float]]) -> int """Convert seconds to milliseconds, but return None for None.""" return int(float(v) * 1000.0) if v is not None else v kombu-4.1.0/kombu/utils/amq_manager.py0000644000175000017500000000134613130603207017655 0ustar omeromer00000000000000"""AMQP Management API utilities.""" from __future__ import absolute_import, unicode_literals def get_manager(client, hostname=None, port=None, userid=None, password=None): """Get pyrabbit manager.""" import pyrabbit opt = client.transport_options.get def get(name, val, default): return (val if val is not None else opt('manager_%s' % name) or getattr(client, name, None) or default) host = get('hostname', hostname, 'localhost') port = port if port is not None else opt('manager_port', 15672) userid = get('userid', userid, 'guest') password = get('password', password, 'guest') return pyrabbit.Client('%s:%s' % (host, port), userid, password) kombu-4.1.0/kombu/utils/objects.py0000644000175000017500000000373213130603207017037 0ustar omeromer00000000000000"""Object Utilities.""" from __future__ import absolute_import, unicode_literals class cached_property(object): """Cached property descriptor. Caches the return value of the get method on first call. Examples: .. code-block:: python @cached_property def connection(self): return Connection() @connection.setter # Prepares stored value def connection(self, value): if value is None: raise TypeError('Connection must be a connection') return value @connection.deleter def connection(self, value): # Additional action to do at del(self.attr) if value is not None: print('Connection {0!r} deleted'.format(value) """ def __init__(self, fget=None, fset=None, fdel=None, doc=None): self.__get = fget self.__set = fset self.__del = fdel self.__doc__ = doc or fget.__doc__ self.__name__ = fget.__name__ self.__module__ = fget.__module__ def __get__(self, obj, type=None): if obj is None: return self try: return obj.__dict__[self.__name__] except KeyError: value = obj.__dict__[self.__name__] = self.__get(obj) return value def __set__(self, obj, value): if obj is None: return self if self.__set is not None: value = self.__set(obj, value) obj.__dict__[self.__name__] = value def __delete__(self, obj, _sentinel=object()): if obj is None: return self value = obj.__dict__.pop(self.__name__, _sentinel) if self.__del is not None and value is not _sentinel: self.__del(obj, value) def setter(self, fset): return self.__class__(self.__get, fset, self.__del) def deleter(self, fdel): return self.__class__(self.__get, self.__set, fdel) kombu-4.1.0/kombu/utils/__init__.py0000644000175000017500000000131513130603207017140 0ustar omeromer00000000000000"""DEPRECATED - Import from modules below.""" from __future__ import absolute_import, print_function, unicode_literals from .collections import EqualityDict from .compat import fileno, maybe_fileno, nested, register_after_fork from .div import emergency_dump_state from .functional import ( fxrange, fxrangemax, maybe_list, reprcall, retry_over_time, ) from .imports import symbol_by_name from .objects import cached_property from .uuid import uuid __all__ = [ 'EqualityDict', 'uuid', 'maybe_list', 'fxrange', 'fxrangemax', 'retry_over_time', 'emergency_dump_state', 'cached_property', 'register_after_fork', 'reprkwargs', 'reprcall', 'symbol_by_name', 'nested', 'fileno', 'maybe_fileno', ] kombu-4.1.0/kombu/utils/compat.py0000644000175000017500000000672713130603207016700 0ustar omeromer00000000000000"""Python Compatibility Utilities.""" from __future__ import absolute_import, unicode_literals import numbers import sys from functools import wraps from contextlib import contextmanager from kombu.five import reraise try: from io import UnsupportedOperation FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation) except ImportError: # pragma: no cover # Py2 FILENO_ERRORS = (AttributeError, ValueError) # noqa try: from billiard.util import register_after_fork except ImportError: # pragma: no cover try: from multiprocessing.util import register_after_fork # noqa except ImportError: register_after_fork = None # noqa try: from typing import NamedTuple except ImportError: import collections def NamedTuple(name, fields): """Typed version of collections.namedtuple.""" return collections.namedtuple(name, [k for k, _ in fields]) _environment = None def coro(gen): """Decorator to mark generator as co-routine.""" @wraps(gen) def wind_up(*args, **kwargs): it = gen(*args, **kwargs) next(it) return it return wind_up def _detect_environment(): # ## -eventlet- if 'eventlet' in sys.modules: try: from eventlet.patcher import is_monkey_patched as is_eventlet import socket if is_eventlet(socket): return 'eventlet' except ImportError: pass # ## -gevent- if 'gevent' in sys.modules: try: from gevent import socket as _gsocket import socket if socket.socket is _gsocket.socket: return 'gevent' except ImportError: pass return 'default' def detect_environment(): """Detect the current environment: default, eventlet, or gevent.""" global _environment if _environment is None: _environment = _detect_environment() return _environment def entrypoints(namespace): """Return setuptools entrypoints for namespace.""" try: from pkg_resources import iter_entry_points except ImportError: return iter([]) return ((ep, ep.load()) for ep in iter_entry_points(namespace)) def fileno(f): """Get fileno from file-like object.""" if isinstance(f, numbers.Integral): return f return f.fileno() def maybe_fileno(f): """Get object fileno, or :const:`None` if not defined.""" try: return fileno(f) except FILENO_ERRORS: pass @contextmanager def nested(*managers): # pragma: no cover """Nest context managers.""" # flake8: noqa exits = [] vars = [] exc = (None, None, None) try: try: for mgr in managers: exit = mgr.__exit__ enter = mgr.__enter__ vars.append(enter()) exits.append(exit) yield vars except: exc = sys.exc_info() finally: while exits: exit = exits.pop() try: if exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() if exc != (None, None, None): # Don't rely on sys.exc_info() still containing # the right information. Another exception may # have been raised and caught by an exit method reraise(exc[0], exc[1], exc[2]) finally: del(exc) kombu-4.1.0/kombu/utils/url.py0000644000175000017500000000541513130603207016210 0ustar omeromer00000000000000"""URL Utilities.""" from __future__ import absolute_import, unicode_literals from collections import Mapping from functools import partial try: from urllib.parse import parse_qsl, quote, unquote, urlparse except ImportError: from urllib import quote, unquote # noqa from urlparse import urlparse, parse_qsl # noqa from kombu.five import bytes_if_py2, string_t from .compat import NamedTuple safequote = partial(quote, safe=bytes_if_py2('')) urlparts = NamedTuple('urlparts', [ ('scheme', str), ('hostname', str), ('port', int), ('username', str), ('password', str), ('path', str), ('query', Mapping), ]) def parse_url(url): # type: (str) -> Dict """Parse URL into mapping of components.""" scheme, host, port, user, password, path, query = _parse_url(url) return dict(transport=scheme, hostname=host, port=port, userid=user, password=password, virtual_host=path, **query) def url_to_parts(url): # type: (str) -> urlparts """Parse URL into :class:`urlparts` tuple of components.""" scheme = urlparse(url).scheme schemeless = url[len(scheme) + 3:] # parse with HTTP URL semantics parts = urlparse('http://' + schemeless) path = parts.path or '' path = path[1:] if path and path[0] == '/' else path return urlparts( scheme, unquote(parts.hostname or '') or None, parts.port, unquote(parts.username or '') or None, unquote(parts.password or '') or None, unquote(path or '') or None, dict(parse_qsl(parts.query)), ) _parse_url = url_to_parts # noqa def as_url(scheme, host=None, port=None, user=None, password=None, path=None, query=None, sanitize=False, mask='**'): # type: (str, str, int, str, str, str, str, bool, str) -> str """Generate URL from component parts.""" parts = ['{0}://'.format(scheme)] if user or password: if user: parts.append(safequote(user)) if password: if sanitize: parts.extend([':', mask] if mask else [':']) else: parts.extend([':', safequote(password)]) parts.append('@') parts.append(safequote(host) if host else '') if port: parts.extend([':', port]) parts.extend(['/', path]) return ''.join(str(part) for part in parts if part) def sanitize_url(url, mask='**'): # type: (str, str) -> str """Return copy of URL with password removed.""" return as_url(*_parse_url(url), sanitize=True, mask=mask) def maybe_sanitize_url(url, mask='**'): # type: (Any, str) -> Any """Sanitize url, or do nothing if url undefined.""" if isinstance(url, string_t) and '://' in url: return sanitize_url(url, mask) return url kombu-4.1.0/kombu/utils/text.py0000644000175000017500000000425113130603207016367 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Text Utilities.""" from __future__ import absolute_import, unicode_literals from difflib import SequenceMatcher from kombu import version_info_t from kombu.five import string_t def escape_regex(p, white=''): # type: (str, str) -> str """Escape string for use within a regular expression.""" # what's up with re.escape? that code must be neglected or someting return ''.join(c if c.isalnum() or c in white else ('\\000' if c == '\000' else '\\' + c) for c in p) def fmatch_iter(needle, haystack, min_ratio=0.6): # type: (str, Sequence[str], float) -> Iterator[Tuple[float, str]] """Fuzzy match: iteratively. Yields: Tuple: of ratio and key. """ for key in haystack: ratio = SequenceMatcher(None, needle, key).ratio() if ratio >= min_ratio: yield ratio, key def fmatch_best(needle, haystack, min_ratio=0.6): # type: (str, Sequence[str], float) -> str """Fuzzy match - Find best match (scalar).""" try: return sorted( fmatch_iter(needle, haystack, min_ratio), reverse=True, )[0][1] except IndexError: pass def version_string_as_tuple(s): # type (str) -> version_info_t """Convert version string to version info tuple.""" v = _unpack_version(*s.split('.')) # X.Y.3a1 -> (X, Y, 3, 'a1') if isinstance(v.micro, string_t): v = version_info_t(v.major, v.minor, *_splitmicro(*v[2:])) # X.Y.3a1-40 -> (X, Y, 3, 'a1', '40') if not v.serial and v.releaselevel and '-' in v.releaselevel: v = version_info_t(*list(v[0:3]) + v.releaselevel.split('-')) return v def _unpack_version(major, minor=0, micro=0, releaselevel='', serial=''): # type: (int, int, int, str, str) -> version_info_t return version_info_t(int(major), int(minor), micro, releaselevel, serial) def _splitmicro(micro, releaselevel='', serial=''): # type: (int, str, str) -> Tuple[int, str, str] for index, char in enumerate(micro): if not char.isdigit(): break else: return int(micro or 0), releaselevel, serial return int(micro[:index]), micro[index:], serial kombu-4.1.0/kombu/utils/collections.py0000644000175000017500000000171313130603207017721 0ustar omeromer00000000000000"""Custom maps, sequences, etc.""" from __future__ import absolute_import, unicode_literals class HashedSeq(list): """Hashed Sequence. Type used for hash() to make sure the hash is not generated multiple times. """ __slots__ = 'hashvalue' def __init__(self, *seq): self[:] = seq self.hashvalue = hash(seq) def __hash__(self): return self.hashvalue def eqhash(o): """Call ``obj.__eqhash__``.""" try: return o.__eqhash__() except AttributeError: return hash(o) class EqualityDict(dict): """Dict using the eq operator for keying.""" def __getitem__(self, key): h = eqhash(key) if h not in self: return self.__missing__(key) return dict.__getitem__(self, h) def __setitem__(self, key, value): return dict.__setitem__(self, eqhash(key), value) def __delitem__(self, key): return dict.__delitem__(self, eqhash(key)) kombu-4.1.0/kombu/utils/div.py0000644000175000017500000000172313130603207016166 0ustar omeromer00000000000000"""Div. Utilities.""" from __future__ import absolute_import, unicode_literals, print_function from .encoding import default_encode import sys def emergency_dump_state(state, open_file=open, dump=None, stderr=None): """Dump message state to stdout or file.""" from pprint import pformat from tempfile import mktemp stderr = sys.stderr if stderr is None else stderr if dump is None: import pickle dump = pickle.dump persist = mktemp() print('EMERGENCY DUMP STATE TO FILE -> {0} <-'.format(persist), # noqa file=stderr) fh = open_file(persist, 'w') try: try: dump(state, fh, protocol=0) except Exception as exc: print( # noqa 'Cannot pickle state: {0!r}. Fallback to pformat.'.format(exc), file=stderr, ) fh.write(default_encode(pformat(state))) finally: fh.flush() fh.close() return persist kombu-4.1.0/kombu/utils/limits.py0000644000175000017500000000474413130603207016713 0ustar omeromer00000000000000"""Token bucket implementation for rate limiting.""" from __future__ import absolute_import, unicode_literals from collections import deque from kombu.five import monotonic __all__ = ['TokenBucket'] class TokenBucket(object): """Token Bucket Algorithm. See Also: https://en.wikipedia.org/wiki/Token_Bucket Most of this code was stolen from an entry in the ASPN Python Cookbook: https://code.activestate.com/recipes/511490/ Warning: Thread Safety: This implementation is not thread safe. Access to a `TokenBucket` instance should occur within the critical section of any multithreaded code. """ #: The rate in tokens/second that the bucket will be refilled. fill_rate = None #: Maximum number of tokens in the bucket. capacity = 1 #: Timestamp of the last time a token was taken out of the bucket. timestamp = None def __init__(self, fill_rate, capacity=1): self.capacity = float(capacity) self._tokens = capacity self.fill_rate = float(fill_rate) self.timestamp = monotonic() self.contents = deque() def add(self, item): self.contents.append(item) def pop(self): return self.contents.popleft() def clear_pending(self): self.contents.clear() def can_consume(self, tokens=1): """Check if one or more tokens can be consumed. Returns: bool: true if the number of tokens can be consumed from the bucket. If they can be consumed, a call will also consume the requested number of tokens from the bucket. Calls will only consume `tokens` (the number requested) or zero tokens -- it will never consume a partial number of tokens. """ if tokens <= self._get_tokens(): self._tokens -= tokens return True return False def expected_time(self, tokens=1): """Return estimated time of token availability. Returns: float: the time in seconds. """ _tokens = self._get_tokens() tokens = max(tokens, _tokens) return (tokens - _tokens) / self.fill_rate def _get_tokens(self): if self._tokens < self.capacity: now = monotonic() delta = self.fill_rate * (now - self.timestamp) self._tokens = min(self.capacity, self._tokens + delta) self.timestamp = now return self._tokens kombu-4.1.0/kombu/utils/functional.py0000644000175000017500000002465213130603207017554 0ustar omeromer00000000000000"""Functional Utilities.""" from __future__ import absolute_import, unicode_literals import random import sys import threading from collections import Iterable, Mapping, OrderedDict from itertools import count, repeat from time import sleep from vine.utils import wraps from kombu.five import ( UserDict, items, keys, python_2_unicode_compatible, string_t, ) from .encoding import safe_repr as _safe_repr __all__ = [ 'LRUCache', 'memoize', 'lazy', 'maybe_evaluate', 'is_list', 'maybe_list', 'dictfilter', ] KEYWORD_MARK = object() @python_2_unicode_compatible class ChannelPromise(object): def __init__(self, contract): self.__contract__ = contract def __call__(self): try: return self.__value__ except AttributeError: value = self.__value__ = self.__contract__() return value def __repr__(self): try: return repr(self.__value__) except AttributeError: return ''.format(id(self.__contract__)) class LRUCache(UserDict): """LRU Cache implementation using a doubly linked list to track access. Arguments: limit (int): The maximum number of keys to keep in the cache. When a new key is inserted and the limit has been exceeded, the *Least Recently Used* key will be discarded from the cache. """ def __init__(self, limit=None): self.limit = limit self.mutex = threading.RLock() self.data = OrderedDict() def __getitem__(self, key): with self.mutex: value = self[key] = self.data.pop(key) return value def update(self, *args, **kwargs): with self.mutex: data, limit = self.data, self.limit data.update(*args, **kwargs) if limit and len(data) > limit: # pop additional items in case limit exceeded for _ in range(len(data) - limit): data.popitem(last=False) def popitem(self, last=True): with self.mutex: return self.data.popitem(last) def __setitem__(self, key, value): # remove least recently used key. with self.mutex: if self.limit and len(self.data) >= self.limit: self.data.pop(next(iter(self.data))) self.data[key] = value def __iter__(self): return iter(self.data) def _iterate_items(self): with self.mutex: for k in self: try: yield (k, self.data[k]) except KeyError: # pragma: no cover pass iteritems = _iterate_items def _iterate_values(self): with self.mutex: for k in self: try: yield self.data[k] except KeyError: # pragma: no cover pass itervalues = _iterate_values def _iterate_keys(self): # userdict.keys in py3k calls __getitem__ with self.mutex: return keys(self.data) iterkeys = _iterate_keys def incr(self, key, delta=1): with self.mutex: # this acts as memcached does- store as a string, but return a # integer as long as it exists and we can cast it newval = int(self.data.pop(key)) + delta self[key] = str(newval) return newval def __getstate__(self): d = dict(vars(self)) d.pop('mutex') return d def __setstate__(self, state): self.__dict__ = state self.mutex = threading.RLock() if sys.version_info[0] == 3: # pragma: no cover keys = _iterate_keys values = _iterate_values items = _iterate_items else: # noqa def keys(self): return list(self._iterate_keys()) def values(self): return list(self._iterate_values()) def items(self): return list(self._iterate_items()) def memoize(maxsize=None, keyfun=None, Cache=LRUCache): """Decorator to cache function return value.""" def _memoize(fun): mutex = threading.Lock() cache = Cache(limit=maxsize) @wraps(fun) def _M(*args, **kwargs): if keyfun: key = keyfun(args, kwargs) else: key = args + (KEYWORD_MARK,) + tuple(sorted(kwargs.items())) try: with mutex: value = cache[key] except KeyError: value = fun(*args, **kwargs) _M.misses += 1 with mutex: cache[key] = value else: _M.hits += 1 return value def clear(): """Clear the cache and reset cache statistics.""" cache.clear() _M.hits = _M.misses = 0 _M.hits = _M.misses = 0 _M.clear = clear _M.original_func = fun return _M return _memoize @python_2_unicode_compatible class lazy(object): """Holds lazy evaluation. Evaluated when called or if the :meth:`evaluate` method is called. The function is re-evaluated on every call. Overloaded operations that will evaluate the promise: :meth:`__str__`, :meth:`__repr__`, :meth:`__cmp__`. """ def __init__(self, fun, *args, **kwargs): self._fun = fun self._args = args self._kwargs = kwargs def __call__(self): return self.evaluate() def evaluate(self): return self._fun(*self._args, **self._kwargs) def __str__(self): return str(self()) def __repr__(self): return repr(self()) def __eq__(self, rhs): return self() == rhs def __ne__(self, rhs): return self() != rhs def __deepcopy__(self, memo): memo[id(self)] = self return self def __reduce__(self): return (self.__class__, (self._fun,), {'_args': self._args, '_kwargs': self._kwargs}) if sys.version_info[0] < 3: def __cmp__(self, rhs): if isinstance(rhs, self.__class__): return -cmp(rhs, self()) return cmp(self(), rhs) def maybe_evaluate(value): """Evaluate value only if value is a :class:`lazy` instance.""" if isinstance(value, lazy): return value.evaluate() return value def is_list(l, scalars=(Mapping, string_t), iters=(Iterable,)): """Return true if the object is iterable. Note: Returns false if object is a mapping or string. """ return isinstance(l, iters) and not isinstance(l, scalars or ()) def maybe_list(l, scalars=(Mapping, string_t)): """Return list of one element if ``l`` is a scalar.""" return l if l is None or is_list(l, scalars) else [l] def dictfilter(d=None, **kw): """Remove all keys from dict ``d`` whose value is :const:`None`.""" d = kw if d is None else (dict(d, **kw) if kw else d) return {k: v for k, v in items(d) if v is not None} def shufflecycle(it): it = list(it) # don't modify callers list shuffle = random.shuffle for _ in repeat(None): shuffle(it) yield it[0] def fxrange(start=1.0, stop=None, step=1.0, repeatlast=False): cur = start * 1.0 while 1: if not stop or cur <= stop: yield cur cur += step else: if not repeatlast: break yield cur - step def fxrangemax(start=1.0, stop=None, step=1.0, max=100.0): sum_, cur = 0, start * 1.0 while 1: if sum_ >= max: break yield cur if stop: cur = min(cur + step, stop) else: cur += step sum_ += cur def retry_over_time(fun, catch, args=[], kwargs={}, errback=None, max_retries=None, interval_start=2, interval_step=2, interval_max=30, callback=None): """Retry the function over and over until max retries is exceeded. For each retry we sleep a for a while before we try again, this interval is increased for every retry until the max seconds is reached. Arguments: fun (Callable): The function to try catch (Tuple[BaseException]): Exceptions to catch, can be either tuple or a single exception class. Keyword Arguments: args (Tuple): Positional arguments passed on to the function. kwargs (Dict): Keyword arguments passed on to the function. errback (Callable): Callback for when an exception in ``catch`` is raised. The callback must take three arguments: ``exc``, ``interval_range`` and ``retries``, where ``exc`` is the exception instance, ``interval_range`` is an iterator which return the time in seconds to sleep next, and ``retries`` is the number of previous retries. max_retries (int): Maximum number of retries before we give up. If this is not set, we will retry forever. interval_start (float): How long (in seconds) we start sleeping between retries. interval_step (float): By how much the interval is increased for each retry. interval_max (float): Maximum number of seconds to sleep between retries. """ retries = 0 interval_range = fxrange(interval_start, interval_max + interval_start, interval_step, repeatlast=True) for retries in count(): try: return fun(*args, **kwargs) except catch as exc: if max_retries and retries >= max_retries: raise if callback: callback() tts = float(errback(exc, interval_range, retries) if errback else next(interval_range)) if tts: for _ in range(int(tts)): if callback: callback() sleep(1.0) # sleep remainder after int truncation above. sleep(abs(int(tts) - tts)) def reprkwargs(kwargs, sep=', ', fmt='{0}={1}'): return sep.join(fmt.format(k, _safe_repr(v)) for k, v in items(kwargs)) def reprcall(name, args=(), kwargs={}, sep=', '): return '{0}({1}{2}{3})'.format( name, sep.join(map(_safe_repr, args or ())), (args and kwargs) and sep or '', reprkwargs(kwargs, sep), ) # Compat names (before kombu 3.0) promise = lazy maybe_promise = maybe_evaluate kombu-4.1.0/kombu/utils/debug.py0000644000175000017500000000340013130603207016464 0ustar omeromer00000000000000"""Debugging support.""" from __future__ import absolute_import, unicode_literals import logging from vine.utils import wraps from kombu.five import items, python_2_unicode_compatible from kombu.log import get_logger __all__ = ['setup_logging', 'Logwrapped'] def setup_logging(loglevel=logging.DEBUG, loggers=['kombu.connection', 'kombu.channel']): """Setup logging to stdout.""" for logger in loggers: l = get_logger(logger) l.addHandler(logging.StreamHandler()) l.setLevel(loglevel) @python_2_unicode_compatible class Logwrapped(object): """Wrap all object methods, to log on call.""" __ignore = ('__enter__', '__exit__') def __init__(self, instance, logger=None, ident=None): self.instance = instance self.logger = get_logger(logger) self.ident = ident def __getattr__(self, key): meth = getattr(self.instance, key) if not callable(meth) or key in self.__ignore: return meth @wraps(meth) def __wrapped(*args, **kwargs): info = '' if self.ident: info += self.ident.format(self.instance) info += '{0.__name__}('.format(meth) if args: info += ', '.join(map(repr, args)) if kwargs: if args: info += ', ' info += ', '.join('{k}={v!r}'.format(k=key, v=value) for key, value in items(kwargs)) info += ')' self.logger.debug(info) return meth(*args, **kwargs) return __wrapped def __repr__(self): return repr(self.instance) def __dir__(self): return dir(self.instance) kombu-4.1.0/kombu/utils/uuid.py0000644000175000017500000000042213130603207016345 0ustar omeromer00000000000000"""UUID utilities.""" from __future__ import absolute_import, unicode_literals from uuid import uuid4 def uuid(_uuid=uuid4): """Generate unique id in UUID4 format. See Also: For now this is provided by :func:`uuid.uuid4`. """ return str(_uuid()) kombu-4.1.0/kombu/utils/scheduling.py0000644000175000017500000000576213130603207017540 0ustar omeromer00000000000000"""Scheduling Utilities.""" from __future__ import absolute_import, unicode_literals from itertools import count from kombu.five import python_2_unicode_compatible from .imports import symbol_by_name __all__ = [ 'FairCycle', 'priority_cycle', 'round_robin_cycle', 'sorted_cycle', ] CYCLE_ALIASES = { 'priority': 'kombu.utils.scheduling:priority_cycle', 'round_robin': 'kombu.utils.scheduling:round_robin_cycle', 'sorted': 'kombu.utils.scheduling:sorted_cycle', } @python_2_unicode_compatible class FairCycle(object): """Cycle between resources. Consume from a set of resources, where each resource gets an equal chance to be consumed from. Arguments: fun (Callable): Callback to call. resources (Sequence[Any]): List of resources. predicate (type): Exception predicate. """ def __init__(self, fun, resources, predicate=Exception): self.fun = fun self.resources = resources self.predicate = predicate self.pos = 0 def _next(self): while 1: try: resource = self.resources[self.pos] self.pos += 1 return resource except IndexError: self.pos = 0 if not self.resources: raise self.predicate() def get(self, callback, **kwargs): """Get from next resource.""" for tried in count(0): # for infinity resource = self._next() try: return self.fun(resource, callback, **kwargs) except self.predicate: # reraise when retries exchausted. if tried >= len(self.resources) - 1: raise def close(self): """Close cycle.""" pass def __repr__(self): """``repr(cycle)``.""" return ''.format( self=self, size=len(self.resources)) class round_robin_cycle(object): """Iterator that cycles between items in round-robin.""" def __init__(self, it=None): self.items = it if it is not None else [] def update(self, it): """Update items from iterable.""" self.items[:] = it def consume(self, n): """Consume n items.""" return self.items[:n] def rotate(self, last_used): """Move most recently used item to end of list.""" items = self.items try: items.append(items.pop(items.index(last_used))) except ValueError: pass return last_used class priority_cycle(round_robin_cycle): """Cycle that repeats items in order.""" def rotate(self, last_used): """Unused in this implementation.""" pass class sorted_cycle(priority_cycle): """Cycle in sorted order.""" def consume(self, n): """Consume n items.""" return sorted(self.items[:n]) def cycle_by_name(name): """Get cycle class by name.""" return symbol_by_name(name, CYCLE_ALIASES) kombu-4.1.0/kombu/utils/imports.py0000644000175000017500000000403013130603207017073 0ustar omeromer00000000000000"""Import related utilities.""" from __future__ import absolute_import, unicode_literals import importlib import sys from kombu.five import reraise, string_t def symbol_by_name(name, aliases={}, imp=None, package=None, sep='.', default=None, **kwargs): """Get symbol by qualified name. The name should be the full dot-separated path to the class:: modulename.ClassName Example:: celery.concurrency.processes.TaskPool ^- class name or using ':' to separate module and symbol:: celery.concurrency.processes:TaskPool If `aliases` is provided, a dict containing short name/long name mappings, the name is looked up in the aliases first. Examples: >>> symbol_by_name('celery.concurrency.processes.TaskPool') >>> symbol_by_name('default', { ... 'default': 'celery.concurrency.processes.TaskPool'}) # Does not try to look up non-string names. >>> from celery.concurrency.processes import TaskPool >>> symbol_by_name(TaskPool) is TaskPool True """ if imp is None: imp = importlib.import_module if not isinstance(name, string_t): return name # already a class name = aliases.get(name) or name sep = ':' if ':' in name else sep module_name, _, cls_name = name.rpartition(sep) if not module_name: cls_name, module_name = None, package if package else cls_name try: try: module = imp(module_name, package=package, **kwargs) except ValueError as exc: reraise(ValueError, ValueError("Couldn't import {0!r}: {1}".format(name, exc)), sys.exc_info()[2]) return getattr(module, cls_name) if cls_name else module except (ImportError, AttributeError): if default is None: raise return default kombu-4.1.0/kombu/utils/json.py0000644000175000017500000000573113130603207016360 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """JSON Serialization Utilities.""" from __future__ import absolute_import, unicode_literals import datetime import decimal import json as stdjson import sys import uuid from kombu.five import buffer_t, text_t, bytes_t try: from django.utils.functional import Promise as DjangoPromise except ImportError: # pragma: no cover class DjangoPromise(object): # noqa """Dummy object.""" try: import simplejson as json _json_extra_kwargs = {'use_decimal': False} except ImportError: # pragma: no cover import json # noqa _json_extra_kwargs = {} # noqa class _DecodeError(Exception): # noqa pass else: from simplejson.decoder import JSONDecodeError as _DecodeError IS_PY3 = sys.version_info[0] == 3 _encoder_cls = type(json._default_encoder) _default_encoder = None # ... set to JSONEncoder below. class JSONEncoder(_encoder_cls): """Kombu custom json encoder.""" def default(self, o, dates=(datetime.datetime, datetime.date), times=(datetime.time,), textual=(decimal.Decimal, uuid.UUID, DjangoPromise), isinstance=isinstance, datetime=datetime.datetime, text_t=text_t): reducer = getattr(o, '__json__', None) if reducer is not None: return reducer() else: if isinstance(o, dates): if not isinstance(o, datetime): o = datetime(o.year, o.month, o.day, 0, 0, 0, 0) r = o.isoformat() if r.endswith("+00:00"): r = r[:-6] + "Z" return r elif isinstance(o, times): return o.isoformat() elif isinstance(o, textual): return text_t(o) return super(JSONEncoder, self).default(o) _default_encoder = JSONEncoder def dumps(s, _dumps=json.dumps, cls=None, default_kwargs=_json_extra_kwargs, **kwargs): """Serialize object to json string.""" return _dumps(s, cls=cls or _default_encoder, **dict(default_kwargs, **kwargs)) def loads(s, _loads=json.loads, decode_bytes=IS_PY3): """Deserialize json from string.""" # None of the json implementations supports decoding from # a buffer/memoryview, or even reading from a stream # (load is just loads(fp.read())) # but this is Python, we love copying strings, preferably many times # over. Note that pickle does support buffer/memoryview # if isinstance(s, memoryview): s = s.tobytes().decode('utf-8') elif isinstance(s, bytearray): s = s.decode('utf-8') elif decode_bytes and isinstance(s, bytes_t): s = s.decode('utf-8') elif isinstance(s, buffer_t): s = text_t(s) # ... awwwwwww :( try: return _loads(s) except _DecodeError: # catch "Unpaired high surrogate" error return stdjson.loads(s) kombu-4.1.0/kombu/async/0000755000175000017500000000000013134154263015013 5ustar omeromer00000000000000kombu-4.1.0/kombu/async/timer.py0000644000175000017500000001473113130603207016504 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Timer scheduling Python callbacks.""" from __future__ import absolute_import, unicode_literals import heapq import sys from collections import namedtuple from datetime import datetime from functools import total_ordering from weakref import proxy as weakrefproxy from vine.utils import wraps from kombu.five import monotonic, python_2_unicode_compatible from kombu.log import get_logger from time import time as _time try: from pytz import utc except ImportError: # pragma: no cover utc = None __all__ = ['Entry', 'Timer', 'to_timestamp'] logger = get_logger(__name__) DEFAULT_MAX_INTERVAL = 2 EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=utc) IS_PYPY = hasattr(sys, 'pypy_version_info') scheduled = namedtuple('scheduled', ('eta', 'priority', 'entry')) def to_timestamp(d, default_timezone=utc, time=monotonic): """Convert datetime to timestamp. If d' is already a timestamp, then that will be used. """ if isinstance(d, datetime): if d.tzinfo is None: d = d.replace(tzinfo=default_timezone) diff = _time() - time() return max((d - EPOCH).total_seconds() - diff, 0) return d @total_ordering @python_2_unicode_compatible class Entry(object): """Schedule Entry.""" if not IS_PYPY: # pragma: no cover __slots__ = ( 'fun', 'args', 'kwargs', 'tref', 'canceled', '_last_run', '__weakref__', ) def __init__(self, fun, args=None, kwargs=None): self.fun = fun self.args = args or [] self.kwargs = kwargs or {} self.tref = weakrefproxy(self) self._last_run = None self.canceled = False def __call__(self): return self.fun(*self.args, **self.kwargs) def cancel(self): try: self.tref.canceled = True except ReferenceError: # pragma: no cover pass def __repr__(self): return '= secs: tref._last_run = now return fun(*args, **kwargs) finally: if not tref.canceled: last = tref._last_run next = secs - (now - last) if last else secs self.enter_after(next, tref, priority) tref.fun = _reschedules tref._last_run = None return self.enter_after(secs, tref, priority) def enter_at(self, entry, eta=None, priority=0, time=monotonic): """Enter function into the scheduler. Arguments: entry (~kombu.async.timer.Entry): Item to enter. eta (datetime.datetime): Scheduled time. priority (int): Unused. """ if eta is None: eta = time() if isinstance(eta, datetime): try: eta = to_timestamp(eta) except Exception as exc: if not self.handle_error(exc): raise return return self._enter(eta, priority, entry) def enter_after(self, secs, entry, priority=0, time=monotonic): return self.enter_at(entry, time() + secs, priority) def _enter(self, eta, priority, entry, push=heapq.heappush): push(self._queue, scheduled(eta, priority, entry)) return entry def apply_entry(self, entry): try: entry() except Exception as exc: if not self.handle_error(exc): logger.error('Error in timer: %r', exc, exc_info=True) def handle_error(self, exc_info): if self.on_error: self.on_error(exc_info) return True def stop(self): pass def __iter__(self, min=min, nowfun=monotonic, pop=heapq.heappop, push=heapq.heappush): """Iterate over schedule. This iterator yields a tuple of ``(entry, wait_seconds)``, where if entry is :const:`None` the caller should wait for ``wait_seconds`` until it polls the schedule again. """ max_interval = self.max_interval queue = self._queue while 1: if queue: eventA = queue[0] now, eta = nowfun(), eventA[0] if now < eta: yield min(eta - now, max_interval), None else: eventB = pop(queue) if eventB is eventA: entry = eventA[2] if not entry.canceled: yield None, entry continue else: push(queue, eventB) else: yield None, None def clear(self): self._queue[:] = [] # atomic, without creating a new list. def cancel(self, tref): tref.cancel() def __len__(self): return len(self._queue) def __nonzero__(self): return True @property def queue(self, _pop=heapq.heappop): """Snapshot of underlying datastructure.""" events = list(self._queue) return [_pop(v) for v in [events] * len(events)] @property def schedule(self): return self kombu-4.1.0/kombu/async/aws/0000755000175000017500000000000013134154263015605 5ustar omeromer00000000000000kombu-4.1.0/kombu/async/aws/connection.py0000644000175000017500000001765113130603207020321 0ustar omeromer00000000000000# * coding: utf8 * """Amazon AWS Connection.""" from __future__ import absolute_import, unicode_literals from vine import promise, transform from kombu.async.aws.ext import AWSRequest, get_response from kombu.async.http import Headers, Request, get_client from kombu.five import items, python_2_unicode_compatible import io try: # pragma: no cover from email import message_from_bytes from email.mime.message import MIMEMessage # py3 def message_from_headers(hdr): # noqa bs = "\r\n".join("{}: {}".format(*h) for h in hdr) return message_from_bytes(bs.encode()) except ImportError: # pragma: no cover from mimetools import Message as MIMEMessage # noqa # py2 def message_from_headers(hdr): # noqa return io.BytesIO(b'\r\n'.join( b'{0}: {1}'.format(*h) for h in hdr )) __all__ = [ 'AsyncHTTPSConnection', 'AsyncConnection', ] @python_2_unicode_compatible class AsyncHTTPResponse(object): """Async HTTP Response.""" def __init__(self, response): self.response = response self._msg = None self.version = 10 def read(self, *args, **kwargs): return self.response.body def getheader(self, name, default=None): return self.response.headers.get(name, default) def getheaders(self): return list(items(self.response.headers)) @property def msg(self): if self._msg is None: self._msg = MIMEMessage(message_from_headers(self.getheaders())) return self._msg @property def status(self): return self.response.code @property def reason(self): if self.response.error: return self.response.error.message return '' def __repr__(self): return repr(self.response) @python_2_unicode_compatible class AsyncHTTPSConnection(object): """Async HTTP Connection.""" Request = Request Response = AsyncHTTPResponse method = 'GET' path = '/' body = None default_ports = {'http': 80, 'https': 443} def __init__(self, strict=None, timeout=20.0, http_client=None): self.headers = [] self.timeout = timeout self.strict = strict self.http_client = http_client or get_client() def request(self, method, path, body=None, headers=None): self.path = path self.method = method if body is not None: try: read = body.read except AttributeError: self.body = body else: self.body = read() if headers is not None: self.headers.extend(list(items(headers))) def getrequest(self): headers = Headers(self.headers) return self.Request(self.path, method=self.method, headers=headers, body=self.body, connect_timeout=self.timeout, request_timeout=self.timeout, validate_cert=False) def getresponse(self, callback=None): request = self.getrequest() request.then(transform(self.Response, callback)) return self.http_client.add_request(request) def set_debuglevel(self, level): pass def connect(self): pass def close(self): pass def putrequest(self, method, path): self.method = method self.path = path def putheader(self, header, value): self.headers.append((header, value)) def endheaders(self): pass def send(self, data): if self.body: self.body += data else: self.body = data def __repr__(self): return ''.format(self.getrequest()) class AsyncConnection(object): """Async AWS Connection.""" def __init__(self, sqs_connection, http_client=None, **kwargs): # noqa self.sqs_connection = sqs_connection self._httpclient = http_client or get_client() def get_http_connection(self): return AsyncHTTPSConnection(http_client=self._httpclient) def _mexe(self, request, sender=None, callback=None): callback = callback or promise() conn = self.get_http_connection() if callable(sender): sender(conn, request.method, request.path, request.body, request.headers, callback) else: conn.request(request.method, request.url, request.body, request.headers) conn.getresponse(callback=callback) return callback class AsyncAWSQueryConnection(AsyncConnection): """Async AWS Query Connection.""" def __init__(self, sqs_connection, http_client=None, http_client_params=None, **kwargs): if not http_client_params: http_client_params = {} AsyncConnection.__init__(self, sqs_connection, http_client, **http_client_params) def make_request(self, operation, params_, path, verb, callback=None): # noqa params = params_.copy() if operation: params['Action'] = operation signer = self.sqs_connection._request_signer # noqa # defaults for non-get signing_type = 'standard' param_payload = {'data': params} if verb.lower() == 'get': # query-based opts signing_type = 'presignurl' param_payload = {'params': params} request = AWSRequest(method=verb, url=path, **param_payload) signer.sign(operation, request, signing_type=signing_type) prepared_request = request.prepare() return self._mexe(prepared_request, callback=callback) def get_list(self, operation, params, markers, path='/', parent=None, verb='POST', callback=None): # noqa return self.make_request( operation, params, path, verb, callback=transform( self._on_list_ready, callback, parent or self, markers, operation ), ) def get_object(self, operation, params, path='/', parent=None, verb='GET', callback=None): # noqa return self.make_request( operation, params, path, verb, callback=transform( self._on_obj_ready, callback, parent or self, operation ), ) def get_status(self, operation, params, path='/', parent=None, verb='GET', callback=None): # noqa return self.make_request( operation, params, path, verb, callback=transform( self._on_status_ready, callback, parent or self, operation ), ) def _on_list_ready(self, parent, markers, operation, response): # noqa service_model = self.sqs_connection.meta.service_model if response.status == 200: _, parsed = get_response( service_model.operation_model(operation), response.response ) return parsed else: raise self._for_status(response, response.read()) def _on_obj_ready(self, parent, operation, response): # noqa service_model = self.sqs_connection.meta.service_model if response.status == 200: _, parsed = get_response( service_model.operation_model(operation), response.response ) return parsed else: raise self._for_status(response, response.read()) def _on_status_ready(self, parent, operation, response): # noqa service_model = self.sqs_connection.meta.service_model if response.status == 200: httpres, _ = get_response( service_model.operation_model(operation), response.response ) return httpres.code else: raise self._for_status(response, response.read()) def _for_status(self, response, body): context = 'Empty body' if not body else 'HTTP Error' return Exception("Request {} HTTP {} {} ({})".format( context, response.status, response.reason, body )) kombu-4.1.0/kombu/async/aws/__init__.py0000644000175000017500000000054613130603207017714 0ustar omeromer00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """Return async connection to Amazon SQS.""" from .sqs.connection import AsyncSQSConnection return AsyncSQSConnection( aws_access_key_id, aws_secret_access_key, **kwargs ) kombu-4.1.0/kombu/async/aws/ext.py0000644000175000017500000000107713130603207016755 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Amazon boto3 interface.""" from __future__ import absolute_import, unicode_literals try: import boto3 from botocore import exceptions from botocore.awsrequest import AWSRequest from botocore.response import get_response except ImportError: boto3 = None class _void(object): pass class BotoCoreError(Exception): pass exceptions = _void() exceptions.BotoCoreError = BotoCoreError AWSRequest = _void() get_response = _void() __all__ = [ 'exceptions', 'AWSRequest', 'get_response' ] kombu-4.1.0/kombu/async/aws/sqs/0000755000175000017500000000000013134154263016413 5ustar omeromer00000000000000kombu-4.1.0/kombu/async/aws/sqs/connection.py0000644000175000017500000001557013130603207021125 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Amazon SQS Connection.""" from __future__ import absolute_import, unicode_literals from vine import transform from kombu.async.aws.connection import AsyncAWSQueryConnection from .ext import boto3 from .message import AsyncMessage from .queue import AsyncQueue __all__ = ['AsyncSQSConnection'] class AsyncSQSConnection(AsyncAWSQueryConnection): """Async SQS Connection.""" def __init__(self, sqs_connection, debug=0, region=None, **kwargs): if boto3 is None: raise ImportError('boto3 is not installed') AsyncAWSQueryConnection.__init__( self, sqs_connection, region_name=region, debug=debug, **kwargs ) def create_queue(self, queue_name, visibility_timeout=None, callback=None): params = {'QueueName': queue_name} if visibility_timeout: params['DefaultVisibilityTimeout'] = format( visibility_timeout, 'd', ) return self.get_object('CreateQueue', params, callback=callback) def delete_queue(self, queue, force_deletion=False, callback=None): return self.get_status('DeleteQueue', None, queue.id, callback=callback) def get_queue_url(self, queue): res = self.sqs_connection.get_queue_url(QueueName=queue) return res['QueueUrl'] def get_queue_attributes(self, queue, attribute='All', callback=None): return self.get_object( 'GetQueueAttributes', {'AttributeName': attribute}, queue.id, callback=callback, ) def set_queue_attribute(self, queue, attribute, value, callback=None): return self.get_status( 'SetQueueAttribute', {'Attribute.Name': attribute, 'Attribute.Value': value}, queue.id, callback=callback, ) def receive_message(self, queue, number_messages=1, visibility_timeout=None, attributes=None, wait_time_seconds=None, callback=None): params = {'MaxNumberOfMessages': number_messages} if visibility_timeout: params['VisibilityTimeout'] = visibility_timeout if attributes: attrs = {} for idx, attr in enumerate(attributes): attrs['AttributeName.' + str(idx + 1)] = attr params.update(attrs) if wait_time_seconds is not None: params['WaitTimeSeconds'] = wait_time_seconds queue_url = self.get_queue_url(queue) return self.get_list( 'ReceiveMessage', params, [('Message', AsyncMessage)], queue_url, callback=callback, parent=queue, ) def delete_message(self, queue, receipt_handle, callback=None): return self.delete_message_from_handle( queue, receipt_handle, callback, ) def delete_message_batch(self, queue, messages, callback=None): params = {} for i, m in enumerate(messages): prefix = 'DeleteMessageBatchRequestEntry.{0}'.format(i + 1) params.update({ '{0}.Id'.format(prefix): m.id, '{0}.ReceiptHandle'.format(prefix): m.receipt_handle, }) return self.get_object( 'DeleteMessageBatch', params, queue.id, verb='POST', callback=callback, ) def delete_message_from_handle(self, queue, receipt_handle, callback=None): return self.get_status( 'DeleteMessage', {'ReceiptHandle': receipt_handle}, queue, callback=callback, ) def send_message(self, queue, message_content, delay_seconds=None, callback=None): params = {'MessageBody': message_content} if delay_seconds: params['DelaySeconds'] = int(delay_seconds) return self.get_object( 'SendMessage', params, queue.id, verb='POST', callback=callback, ) def send_message_batch(self, queue, messages, callback=None): params = {} for i, msg in enumerate(messages): prefix = 'SendMessageBatchRequestEntry.{0}'.format(i + 1) params.update({ '{0}.Id'.format(prefix): msg[0], '{0}.MessageBody'.format(prefix): msg[1], '{0}.DelaySeconds'.format(prefix): msg[2], }) return self.get_object( 'SendMessageBatch', params, queue.id, verb='POST', callback=callback, ) def change_message_visibility(self, queue, receipt_handle, visibility_timeout, callback=None): return self.get_status( 'ChangeMessageVisibility', {'ReceiptHandle': receipt_handle, 'VisibilityTimeout': visibility_timeout}, queue.id, callback=callback, ) def change_message_visibility_batch(self, queue, messages, callback=None): params = {} for i, t in enumerate(messages): pre = 'ChangeMessageVisibilityBatchRequestEntry.{0}'.format(i + 1) params.update({ '{0}.Id'.format(pre): t[0].id, '{0}.ReceiptHandle'.format(pre): t[0].receipt_handle, '{0}.VisibilityTimeout'.format(pre): t[1], }) return self.get_object( 'ChangeMessageVisibilityBatch', params, queue.id, verb='POST', callback=callback, ) def get_all_queues(self, prefix='', callback=None): params = {} if prefix: params['QueueNamePrefix'] = prefix return self.get_list( 'ListQueues', params, [('QueueUrl', AsyncQueue)], callback=callback, ) def get_queue(self, queue_name, callback=None): # TODO Does not support owner_acct_id argument return self.get_all_queues( queue_name, transform(self._on_queue_ready, callback, queue_name), ) lookup = get_queue def _on_queue_ready(self, name, queues): return next( (q for q in queues if q.url.endswith(name)), None, ) def get_dead_letter_source_queues(self, queue, callback=None): return self.get_list( 'ListDeadLetterSourceQueues', {'QueueUrl': queue.url}, [('QueueUrl', AsyncQueue)], callback=callback, ) def add_permission(self, queue, label, aws_account_id, action_name, callback=None): return self.get_status( 'AddPermission', {'Label': label, 'AWSAccountId': aws_account_id, 'ActionName': action_name}, queue.id, callback=callback, ) def remove_permission(self, queue, label, callback=None): return self.get_status( 'RemovePermission', {'Label': label}, queue.id, callback=callback, ) kombu-4.1.0/kombu/async/aws/sqs/__init__.py0000644000175000017500000000000013130603207020503 0ustar omeromer00000000000000kombu-4.1.0/kombu/async/aws/sqs/message.py0000644000175000017500000000165113130603207020405 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Amazon SQS message implementation.""" from __future__ import absolute_import, unicode_literals import base64 from kombu.message import Message from kombu.utils.encoding import str_to_bytes class BaseAsyncMessage(Message): """Base class for messages received on async client.""" class AsyncRawMessage(BaseAsyncMessage): """Raw Message.""" class AsyncMessage(BaseAsyncMessage): """Serialized message.""" def encode(self, value): """Encode/decode the value using Base64 encoding.""" return base64.b64encode(str_to_bytes(value)).decode() def __getitem__(self, item): """Support Boto3-style access on a message.""" if item == 'ReceiptHandle': return self.receipt_handle elif item == 'Body': return self.get_body() elif item == 'queue': return self.queue else: raise KeyError(item) kombu-4.1.0/kombu/async/aws/sqs/ext.py0000644000175000017500000000026013130603207017554 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Amazon SQS boto3 interface.""" from __future__ import absolute_import, unicode_literals try: import boto3 except ImportError: boto3 = None kombu-4.1.0/kombu/async/aws/sqs/queue.py0000644000175000017500000001054113130603207020103 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Amazon SQS queue implementation.""" from __future__ import absolute_import, unicode_literals from vine import transform from .message import AsyncMessage _all__ = ['AsyncQueue'] def list_first(rs): """Get the first item in a list, or None if list empty.""" return rs[0] if len(rs) == 1 else None class AsyncQueue(): """Async SQS Queue.""" def __init__(self, connection=None, url=None, message_class=AsyncMessage): self.connection = connection self.url = url self.message_class = message_class self.visibility_timeout = None def _NA(self, *args, **kwargs): raise NotImplementedError() count_slow = dump = save_to_file = save_to_filename = save = \ save_to_s3 = load_from_s3 = load_from_file = load_from_filename = \ load = clear = _NA def get_attributes(self, attributes='All', callback=None): return self.connection.get_queue_attributes( self, attributes, callback, ) def set_attribute(self, attribute, value, callback=None): return self.connection.set_queue_attribute( self, attribute, value, callback, ) def get_timeout(self, callback=None, _attr='VisibilityTimeout'): return self.get_attributes( _attr, transform( self._coerce_field_value, callback, _attr, int, ), ) def _coerce_field_value(self, key, type, response): return type(response[key]) def set_timeout(self, visibility_timeout, callback=None): return self.set_attribute( 'VisibilityTimeout', visibility_timeout, transform( self._on_timeout_set, callback, ) ) def _on_timeout_set(self, visibility_timeout): if visibility_timeout: self.visibility_timeout = visibility_timeout return self.visibility_timeout def add_permission(self, label, aws_account_id, action_name, callback=None): return self.connection.add_permission( self, label, aws_account_id, action_name, callback, ) def remove_permission(self, label, callback=None): return self.connection.remove_permission(self, label, callback) def read(self, visibility_timeout=None, wait_time_seconds=None, callback=None): return self.get_messages( 1, visibility_timeout, wait_time_seconds=wait_time_seconds, callback=transform(list_first, callback), ) def write(self, message, delay_seconds=None, callback=None): return self.connection.send_message( self, message.get_body_encoded(), delay_seconds, callback=transform(self._on_message_sent, callback, message), ) def write_batch(self, messages, callback=None): return self.connection.send_message_batch( self, messages, callback=callback, ) def _on_message_sent(self, orig_message, new_message): orig_message.id = new_message.id orig_message.md5 = new_message.md5 return new_message def get_messages(self, num_messages=1, visibility_timeout=None, attributes=None, wait_time_seconds=None, callback=None): return self.connection.receive_message( self, number_messages=num_messages, visibility_timeout=visibility_timeout, attributes=attributes, wait_time_seconds=wait_time_seconds, callback=callback, ) def delete_message(self, message, callback=None): return self.connection.delete_message(self, message, callback) def delete_message_batch(self, messages, callback=None): return self.connection.delete_message_batch( self, messages, callback=callback, ) def change_message_visibility_batch(self, messages, callback=None): return self.connection.change_message_visibility_batch( self, messages, callback=callback, ) def delete(self, callback=None): return self.connection.delete_queue(self, callback=callback) def count(self, page_size=10, vtimeout=10, callback=None, _attr='ApproximateNumberOfMessages'): return self.get_attributes( _attr, callback=transform( self._coerce_field_value, callback, _attr, int, ), ) kombu-4.1.0/kombu/async/__init__.py0000644000175000017500000000043213130603207017114 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Event loop.""" from __future__ import absolute_import, unicode_literals from .hub import Hub, get_event_loop, set_event_loop from kombu.utils.eventio import READ, WRITE, ERR __all__ = ['READ', 'WRITE', 'ERR', 'Hub', 'get_event_loop', 'set_event_loop'] kombu-4.1.0/kombu/async/semaphore.py0000644000175000017500000000613213130603207017343 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Semaphores and concurrency primitives.""" from __future__ import absolute_import, unicode_literals from collections import deque from kombu.five import python_2_unicode_compatible __all__ = ['DummyLock', 'LaxBoundedSemaphore'] @python_2_unicode_compatible class LaxBoundedSemaphore(object): """Asynchronous Bounded Semaphore. Lax means that the value will stay within the specified range even if released more times than it was acquired. Example: >>> from future import print_statement as printf # ^ ignore: just fooling stupid pyflakes >>> x = LaxBoundedSemaphore(2) >>> x.acquire(printf, 'HELLO 1') HELLO 1 >>> x.acquire(printf, 'HELLO 2') HELLO 2 >>> x.acquire(printf, 'HELLO 3') >>> x._waiters # private, do not access directly [print, ('HELLO 3',)] >>> x.release() HELLO 3 """ def __init__(self, value): self.initial_value = self.value = value self._waiting = deque() self._add_waiter = self._waiting.append self._pop_waiter = self._waiting.popleft def acquire(self, callback, *partial_args, **partial_kwargs): """Acquire semaphore. This will immediately apply ``callback`` if the resource is available, otherwise the callback is suspended until the semaphore is released. Arguments: callback (Callable): The callback to apply. *partial_args (Any): partial arguments to callback. """ value = self.value if value <= 0: self._add_waiter((callback, partial_args, partial_kwargs)) return False else: self.value = max(value - 1, 0) callback(*partial_args, **partial_kwargs) return True def release(self): """Release semaphore. Note: If there are any waiters this will apply the first waiter that is waiting for the resource (FIFO order). """ try: waiter, args, kwargs = self._pop_waiter() except IndexError: self.value = min(self.value + 1, self.initial_value) else: waiter(*args, **kwargs) def grow(self, n=1): """Change the size of the semaphore to accept more users.""" self.initial_value += n self.value += n [self.release() for _ in range(n)] def shrink(self, n=1): """Change the size of the semaphore to accept less users.""" self.initial_value = max(self.initial_value - n, 0) self.value = max(self.value - n, 0) def clear(self): """Reset the semaphore, which also wipes out any waiting callbacks.""" self._waiting.clear() self.value = self.initial_value def __repr__(self): return '<{0} at {1:#x} value:{2} waiting:{3}>'.format( self.__class__.__name__, id(self), self.value, len(self._waiting), ) class DummyLock(object): """Pretending to be a lock.""" def __enter__(self): return self def __exit__(self, *exc_info): pass kombu-4.1.0/kombu/async/debug.py0000644000175000017500000000352613130603207016452 0ustar omeromer00000000000000"""Event-loop debugging tools.""" from __future__ import absolute_import, unicode_literals from kombu.five import items, string_t from kombu.utils.eventio import READ, WRITE, ERR from kombu.utils.functional import reprcall def repr_flag(flag): """Return description of event loop flag.""" return '{0}{1}{2}'.format('R' if flag & READ else '', 'W' if flag & WRITE else '', '!' if flag & ERR else '') def _rcb(obj): if obj is None: return '' if isinstance(obj, string_t): return obj if isinstance(obj, tuple): cb, args = obj return reprcall(cb.__name__, args=args) return obj.__name__ def repr_active(h): """Return description of active readers and writers.""" return ', '.join(repr_readers(h) + repr_writers(h)) def repr_events(h, events): """Return description of events returned by poll.""" return ', '.join( '{0}({1})->{2}'.format( _rcb(callback_for(h, fd, fl, '(GONE)')), fd, repr_flag(fl), ) for fd, fl in events ) def repr_readers(h): """Return description of pending readers.""" return ['({0}){1}->{2}'.format(fd, _rcb(cb), repr_flag(READ | ERR)) for fd, cb in items(h.readers)] def repr_writers(h): """Return description of pending writers.""" return ['({0}){1}->{2}'.format(fd, _rcb(cb), repr_flag(WRITE)) for fd, cb in items(h.writers)] def callback_for(h, fd, flag, *default): """Return the callback used for hub+fd+flag.""" try: if flag & READ: return h.readers[fd] if flag & WRITE: if fd in h.consolidate: return h.consolidate_callback return h.writers[fd] except KeyError: if default: return default[0] raise kombu-4.1.0/kombu/async/http/0000755000175000017500000000000013134154263015772 5ustar omeromer00000000000000kombu-4.1.0/kombu/async/http/base.py0000644000175000017500000002234213130603207017252 0ustar omeromer00000000000000"""Base async HTTP client implementation.""" from __future__ import absolute_import, unicode_literals import sys from vine import Thenable, promise, maybe_promise from kombu.exceptions import HttpError from kombu.five import items, python_2_unicode_compatible from kombu.utils.compat import coro from kombu.utils.encoding import bytes_to_str from kombu.utils.functional import maybe_list, memoize try: # pragma: no cover from http.client import responses except ImportError: from httplib import responses # noqa __all__ = ['Headers', 'Response', 'Request'] PYPY = hasattr(sys, 'pypy_version_info') @memoize(maxsize=1000) def normalize_header(key): return '-'.join(p.capitalize() for p in key.split('-')) class Headers(dict): """Represents a mapping of HTTP headers.""" # TODO: This is just a regular dict and will not perform normalization # when looking up keys etc. #: Set when all of the headers have been read. complete = False #: Internal attribute used to keep track of continuation lines. _prev_key = None @Thenable.register @python_2_unicode_compatible class Request(object): """A HTTP Request. Arguments: url (str): The URL to request. method (str): The HTTP method to use (defaults to ``GET``). Keyword Arguments: headers (Dict, ~kombu.async.http.Headers): Optional headers for this request body (str): Optional body for this request. connect_timeout (float): Connection timeout in float seconds Default is 30.0. timeout (float): Time in float seconds before the request times out Default is 30.0. follow_redirects (bool): Specify if the client should follow redirects Enabled by default. max_redirects (int): Maximum number of redirects (default 6). use_gzip (bool): Allow the server to use gzip compression. Enabled by default. validate_cert (bool): Set to true if the server certificate should be verified when performing ``https://`` requests. Enabled by default. auth_username (str): Username for HTTP authentication. auth_password (str): Password for HTTP authentication. auth_mode (str): Type of HTTP authentication (``basic`` or ``digest``). user_agent (str): Custom user agent for this request. network_interace (str): Network interface to use for this request. on_ready (Callable): Callback to be called when the response has been received. Must accept single ``response`` argument. on_stream (Callable): Optional callback to be called every time body content has been read from the socket. If specified then the response body and buffer attributes will not be available. on_timeout (callable): Optional callback to be called if the request times out. on_header (Callable): Optional callback to be called for every header line received from the server. The signature is ``(headers, line)`` and note that if you want ``response.headers`` to be populated then your callback needs to also call ``client.on_header(headers, line)``. on_prepare (Callable): Optional callback that is implementation specific (e.g. curl client will pass the ``curl`` instance to this callback). proxy_host (str): Optional proxy host. Note that a ``proxy_port`` must also be provided or a :exc:`ValueError` will be raised. proxy_username (str): Optional username to use when logging in to the proxy. proxy_password (str): Optional password to use when authenticating with the proxy server. ca_certs (str): Custom CA certificates file to use. client_key (str): Optional filename for client SSL key. client_cert (str): Optional filename for client SSL certificate. """ body = user_agent = network_interface = \ auth_username = auth_password = auth_mode = \ proxy_host = proxy_port = proxy_username = proxy_password = \ ca_certs = client_key = client_cert = None connect_timeout = 30.0 request_timeout = 30.0 follow_redirects = True max_redirects = 6 use_gzip = True validate_cert = True if not PYPY: # pragma: no cover __slots__ = ('url', 'method', 'on_ready', 'on_timeout', 'on_stream', 'on_prepare', 'on_header', 'headers', '__weakref__', '__dict__') def __init__(self, url, method='GET', on_ready=None, on_timeout=None, on_stream=None, on_prepare=None, on_header=None, headers=None, **kwargs): self.url = url self.method = method or self.method self.on_ready = maybe_promise(on_ready) or promise() self.on_timeout = maybe_promise(on_timeout) self.on_stream = maybe_promise(on_stream) self.on_prepare = maybe_promise(on_prepare) self.on_header = maybe_promise(on_header) if kwargs: for k, v in items(kwargs): setattr(self, k, v) if not isinstance(headers, Headers): headers = Headers(headers or {}) self.headers = headers def then(self, callback, errback=None): self.on_ready.then(callback, errback) def __repr__(self): return ''.format(self) class Response(object): """HTTP Response. Arguments: request (~kombu.async.http.Request): See :attr:`request`. code (int): See :attr:`code`. headers (~kombu.async.http.Headers): See :attr:`headers`. buffer (bytes): See :attr:`buffer` effective_url (str): See :attr:`effective_url`. status (str): See :attr:`status`. Attributes: request (~kombu.async.http.Request): object used to get this response. code (int): HTTP response code (e.g. 200, 404, or 500). headers (~kombu.async.http.Headers): HTTP headers for this response. buffer (bytes): Socket read buffer. effective_url (str): The destination url for this request after following redirects. error (Exception): Error instance if the request resulted in a HTTP error code. status (str): Human equivalent of :attr:`code`, e.g. ``OK``, `Not found`, or 'Internal Server Error'. """ if not PYPY: # pragma: no cover __slots__ = ('request', 'code', 'headers', 'buffer', 'effective_url', 'error', 'status', '_body', '__weakref__') def __init__(self, request, code, headers=None, buffer=None, effective_url=None, error=None, status=None): self.request = request self.code = code self.headers = headers if headers is not None else Headers() self.buffer = buffer self.effective_url = effective_url or request.url self._body = None self.status = status or responses.get(self.code, 'Unknown') self.error = error if self.error is None and (self.code < 200 or self.code > 299): self.error = HttpError(self.code, self.status, self) def raise_for_error(self): """Raise if the request resulted in an HTTP error code. Raises: :class:`~kombu.exceptions.HttpError` """ if self.error: raise self.error @property def body(self): """The full contents of the response body. Note: Accessing this propery will evaluate the buffer and subsequent accesses will be cached. """ if self._body is None: if self.buffer is not None: self._body = self.buffer.getvalue() return self._body # these are for compatibility with Requests @property def status_code(self): return self.code @property def content(self): return self.body @coro def header_parser(keyt=normalize_header): while 1: (line, headers) = yield if line.startswith('HTTP/'): continue elif not line: headers.complete = True continue elif line[0].isspace(): pkey = headers._prev_key headers[pkey] = ' '.join([headers.get(pkey) or '', line.lstrip()]) else: key, value = line.split(':', 1) key = headers._prev_key = keyt(key) headers[key] = value.strip() class BaseClient(object): Headers = Headers Request = Request Response = Response def __init__(self, hub, **kwargs): self.hub = hub self._header_parser = header_parser() def perform(self, request, **kwargs): for req in maybe_list(request) or []: if not isinstance(req, self.Request): req = self.Request(req, **kwargs) self.add_request(req) def add_request(self, request): raise NotImplementedError('must implement add_request') def close(self): pass def on_header(self, headers, line): try: self._header_parser.send((bytes_to_str(line), headers)) except StopIteration: self._header_parser = header_parser() def __enter__(self): return self def __exit__(self, *exc_info): self.close() kombu-4.1.0/kombu/async/http/__init__.py0000644000175000017500000000120213130603207020067 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from kombu.async import get_event_loop from .base import Request, Headers, Response __all__ = ['Client', 'Headers', 'Response', 'Request'] def Client(hub=None, **kwargs): """Create new HTTP client.""" from .curl import CurlClient return CurlClient(hub, **kwargs) def get_client(hub=None, **kwargs): """Get or create HTTP client bound to the current event loop.""" hub = hub or get_event_loop() try: return hub._current_http_client except AttributeError: client = hub._current_http_client = Client(hub, **kwargs) return client kombu-4.1.0/kombu/async/http/curl.py0000644000175000017500000002244313130603207017307 0ustar omeromer00000000000000"""HTTP Client using pyCurl.""" from __future__ import absolute_import, unicode_literals from collections import deque from functools import partial from io import BytesIO from time import time from kombu.async.hub import READ, WRITE, get_event_loop from kombu.exceptions import HttpError from kombu.five import bytes_if_py2, items from kombu.utils.encoding import bytes_to_str from .base import BaseClient try: import pycurl # noqa except ImportError: # pragma: no cover pycurl = Curl = METH_TO_CURL = None # noqa else: from pycurl import Curl # noqa METH_TO_CURL = { # noqa 'GET': pycurl.HTTPGET, 'POST': pycurl.POST, 'PUT': pycurl.UPLOAD, 'HEAD': pycurl.NOBODY, } __all__ = ['CurlClient'] DEFAULT_USER_AGENT = bytes_if_py2('Mozilla/5.0 (compatible; pycurl)') EXTRA_METHODS = frozenset(['DELETE', 'OPTIONS', 'PATCH']) class CurlClient(BaseClient): """Curl HTTP Client.""" Curl = Curl def __init__(self, hub=None, max_clients=10): if pycurl is None: raise ImportError('The curl client requires the pycurl library.') hub = hub or get_event_loop() super(CurlClient, self).__init__(hub) self.max_clients = max_clients self._multi = pycurl.CurlMulti() self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) self._curls = [self.Curl() for i in range(max_clients)] self._free_list = self._curls[:] self._pending = deque() self._fds = {} self._socket_action = self._multi.socket_action self._timeout_check_tref = self.hub.call_repeatedly( 1.0, self._timeout_check, ) # pycurl 7.29.0 workaround dummy_curl_handle = pycurl.Curl() self._multi.add_handle(dummy_curl_handle) self._multi.remove_handle(dummy_curl_handle) def close(self): self._timeout_check_tref.cancel() for _curl in self._curls: _curl.close() self._multi.close() def add_request(self, request): self._pending.append(request) self._process_queue() self._set_timeout(0) return request def _handle_socket(self, event, fd, multi, data, _pycurl=pycurl): if event == _pycurl.POLL_REMOVE: if fd in self._fds: self.hub.remove(fd) self._fds.pop(fd, None) else: if fd in self._fds: self.hub.remove(fd) if event == _pycurl.POLL_IN: self.hub.add_reader(fd, self.on_readable, fd) self._fds[fd] = READ elif event == _pycurl.POLL_OUT: self.hub.add_writer(fd, self.on_writable, fd) self._fds[fd] = WRITE elif event == _pycurl.POLL_INOUT: self.hub.add_reader(fd, self.on_readable, fd) self.hub.add_writer(fd, self.on_writable, fd) self._fds[fd] = READ | WRITE def _set_timeout(self, msecs): pass # TODO def _timeout_check(self, _pycurl=pycurl): while 1: try: ret, _ = self._multi.socket_all() except pycurl.error as exc: ret = exc.args[0] if ret != _pycurl.E_CALL_MULTI_PERFORM: break self._process_pending_requests() def on_readable(self, fd, _pycurl=pycurl): return self._on_event(fd, _pycurl.CSELECT_IN) def on_writable(self, fd, _pycurl=pycurl): return self._on_event(fd, _pycurl.CSELECT_OUT) def _on_event(self, fd, event, _pycurl=pycurl): while 1: try: ret, _ = self._socket_action(fd, event) except pycurl.error as exc: ret = exc.args[0] if ret != _pycurl.E_CALL_MULTI_PERFORM: break self._process_pending_requests() def _process_pending_requests(self): while 1: q, succeeded, failed = self._multi.info_read() for curl in succeeded: self._process(curl) for curl, errno, reason in failed: self._process(curl, errno, reason) if q == 0: break self._process_queue() def _process_queue(self): while 1: started = 0 while self._free_list and self._pending: started += 1 curl = self._free_list.pop() request = self._pending.popleft() headers = self.Headers() buf = BytesIO() curl.info = { 'headers': headers, 'buffer': buf, 'request': request, 'curl_start_time': time(), } self._setup_request(curl, request, buf, headers) self._multi.add_handle(curl) if not started: break def _process(self, curl, errno=None, reason=None, _pycurl=pycurl): info, curl.info = curl.info, None self._multi.remove_handle(curl) self._free_list.append(curl) buffer = info['buffer'] if errno: code = 599 error = HttpError(code, reason) error.errno = errno effective_url = None buffer.close() buffer = None else: error = None code = curl.getinfo(_pycurl.HTTP_CODE) effective_url = curl.getinfo(_pycurl.EFFECTIVE_URL) buffer.seek(0) # try: request = info['request'] request.on_ready(self.Response( request=request, code=code, headers=info['headers'], buffer=buffer, effective_url=effective_url, error=error, )) def _setup_request(self, curl, request, buffer, headers, _pycurl=pycurl): setopt = curl.setopt setopt(_pycurl.URL, bytes_to_str(request.url)) # see tornado curl client request.headers.setdefault('Expect', '') request.headers.setdefault('Pragma', '') setopt( _pycurl.HTTPHEADER, ['{0}: {1}'.format(*h) for h in items(request.headers)], ) setopt( _pycurl.HEADERFUNCTION, partial(request.on_header or self.on_header, request.headers), ) setopt( _pycurl.WRITEFUNCTION, request.on_stream or buffer.write, ) setopt( _pycurl.FOLLOWLOCATION, request.follow_redirects, ) setopt( _pycurl.USERAGENT, bytes_to_str(request.user_agent or DEFAULT_USER_AGENT), ) if request.network_interface: setopt(_pycurl.INTERFACE, request.network_interface) setopt( _pycurl.ENCODING, 'gzip,deflate' if request.use_gzip else 'none', ) if request.proxy_host: if not request.proxy_port: raise ValueError('Request with proxy_host but no proxy_port') setopt(_pycurl.PROXY, request.proxy_host) setopt(_pycurl.PROXYPORT, request.proxy_port) if request.proxy_username: setopt(_pycurl.PROXYUSERPWD, '{0}:{1}'.format( request.proxy_username, request.proxy_password or '')) else: setopt(_pycurl.PROXY, '') curl.unsetopt(_pycurl.PROXYUSERPWD) setopt(_pycurl.SSL_VERIFYPEER, 1 if request.validate_cert else 0) setopt(_pycurl.SSL_VERIFYHOST, 2 if request.validate_cert else 0) if request.ca_certs is not None: setopt(_pycurl.CAINFO, request.ca_certs) setopt(_pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER) for meth in METH_TO_CURL.values(): setopt(meth, False) try: meth = METH_TO_CURL[request.method] except KeyError: curl.setopt(_pycurl.CUSTOMREQUEST, request.method) else: curl.unsetopt(_pycurl.CUSTOMREQUEST) setopt(meth, True) if request.method in ('POST', 'PUT'): body = request.body.encode('utf-8') if request.body else bytes() reqbuffer = BytesIO(body) setopt(_pycurl.READFUNCTION, reqbuffer.read) if request.method == 'POST': def ioctl(cmd): if cmd == _pycurl.IOCMD_RESTARTREAD: reqbuffer.seek(0) setopt(_pycurl.IOCTLFUNCTION, ioctl) setopt(_pycurl.POSTFIELDSIZE, len(body)) else: setopt(_pycurl.INFILESIZE, len(body)) elif request.method == 'GET': assert not request.body if request.auth_username is not None: auth_mode = { 'basic': _pycurl.HTTPAUTH_BASIC, 'digest': _pycurl.HTTPAUTH_DIGEST }[request.auth_mode or 'basic'] setopt(_pycurl.HTTPAUTH, auth_mode) userpwd = '{0}:{1}'.format( request.auth_username, request.auth_password or '', ) setopt(_pycurl.USERPWD, userpwd) else: curl.unsetopt(_pycurl.USERPWD) if request.client_cert is not None: setopt(_pycurl.SSLCERT, request.client_cert) if request.client_key is not None: setopt(_pycurl.SSLKEY, request.client_key) if request.on_prepare is not None: request.on_prepare(curl) kombu-4.1.0/kombu/async/hub.py0000644000175000017500000002700513134153527016151 0ustar omeromer00000000000000# -*- coding: utf-8 -*- """Event loop implementation.""" from __future__ import absolute_import, unicode_literals import errno import itertools from contextlib import contextmanager from time import sleep from types import GeneratorType as generator # noqa from kombu.five import Empty, python_2_unicode_compatible, range from kombu.log import get_logger from kombu.utils.compat import fileno from kombu.utils.eventio import ERR, READ, WRITE, poll from kombu.utils.objects import cached_property from vine import Thenable, promise from .timer import Timer __all__ = ['Hub', 'get_event_loop', 'set_event_loop'] logger = get_logger(__name__) _current_loop = None W_UNKNOWN_EVENT = """\ Received unknown event %r for fd %r, please contact support!\ """ class Stop(BaseException): """Stops the event loop.""" def _raise_stop_error(): raise Stop() @contextmanager def _dummy_context(*args, **kwargs): yield def get_event_loop(): """Get current event loop object.""" return _current_loop def set_event_loop(loop): """Set the current event loop object.""" global _current_loop _current_loop = loop return loop @python_2_unicode_compatible class Hub(object): """Event loop object. Arguments: timer (kombu.async.Timer): Specify custom timer instance. """ #: Flag set if reading from an fd will not block. READ = READ #: Flag set if writing to an fd will not block. WRITE = WRITE #: Flag set on error, and the fd should be read from asap. ERR = ERR #: List of callbacks to be called when the loop is exiting, #: applied with the hub instance as sole argument. on_close = None def __init__(self, timer=None): self.timer = timer if timer is not None else Timer() self.readers = {} self.writers = {} self.on_tick = set() self.on_close = set() self._ready = set() self._running = False self._loop = None # The eventloop (in celery.worker.loops) # will merge fds in this set and then instead of calling # the callback for each ready fd it will call the # :attr:`consolidate_callback` with the list of ready_fds # as an argument. This API is internal and is only # used by the multiprocessing pool to find inqueues # that are ready to write. self.consolidate = set() self.consolidate_callback = None self.propagate_errors = () self._create_poller() def reset(self): self.close() self._create_poller() def _create_poller(self): self.poller = poll() self._register_fd = self.poller.register self._unregister_fd = self.poller.unregister def _close_poller(self): if self.poller is not None: self.poller.close() self.poller = None self._register_fd = None self._unregister_fd = None def stop(self): self.call_soon(_raise_stop_error) def __repr__(self): return ''.format( id(self), len(self.readers), len(self.writers), ) def fire_timers(self, min_delay=1, max_delay=10, max_timers=10, propagate=()): timer = self.timer delay = None if timer and timer._queue: for i in range(max_timers): delay, entry = next(self.scheduler) if entry is None: break try: entry() except propagate: raise except (MemoryError, AssertionError): raise except OSError as exc: if exc.errno == errno.ENOMEM: raise logger.error('Error in timer: %r', exc, exc_info=1) except Exception as exc: logger.error('Error in timer: %r', exc, exc_info=1) return min(delay or min_delay, max_delay) def _remove_from_loop(self, fd): try: self._unregister(fd) finally: self._discard(fd) def add(self, fd, callback, flags, args=(), consolidate=False): fd = fileno(fd) try: self.poller.register(fd, flags) except ValueError: self._remove_from_loop(fd) raise else: dest = self.readers if flags & READ else self.writers if consolidate: self.consolidate.add(fd) dest[fd] = None else: dest[fd] = callback, args def remove(self, fd): fd = fileno(fd) self._remove_from_loop(fd) def run_forever(self): self._running = True try: while 1: try: self.run_once() except Stop: break finally: self._running = False def run_once(self): try: next(self.loop) except StopIteration: self._loop = None def call_soon(self, callback, *args): if not isinstance(callback, Thenable): callback = promise(callback, args) self._ready.add(callback) return callback def call_later(self, delay, callback, *args): return self.timer.call_after(delay, callback, args) def call_at(self, when, callback, *args): return self.timer.call_at(when, callback, args) def call_repeatedly(self, delay, callback, *args): return self.timer.call_repeatedly(delay, callback, args) def add_reader(self, fds, callback, *args): return self.add(fds, callback, READ | ERR, args) def add_writer(self, fds, callback, *args): return self.add(fds, callback, WRITE, args) def remove_reader(self, fd): writable = fd in self.writers on_write = self.writers.get(fd) try: self._remove_from_loop(fd) finally: if writable: cb, args = on_write self.add(fd, cb, WRITE, args) def remove_writer(self, fd): readable = fd in self.readers on_read = self.readers.get(fd) try: self._remove_from_loop(fd) finally: if readable: cb, args = on_read self.add(fd, cb, READ | ERR, args) def _unregister(self, fd): try: self.poller.unregister(fd) except (AttributeError, KeyError, OSError): pass def close(self, *args): [self._unregister(fd) for fd in self.readers] self.readers.clear() [self._unregister(fd) for fd in self.writers] self.writers.clear() self.consolidate.clear() self._close_poller() for callback in self.on_close: callback(self) def _discard(self, fd): fd = fileno(fd) self.readers.pop(fd, None) self.writers.pop(fd, None) self.consolidate.discard(fd) def on_callback_error(self, callback, exc): logger.error( 'Callback %r raised exception: %r', callback, exc, exc_info=1, ) def create_loop(self, generator=generator, sleep=sleep, min=min, next=next, Empty=Empty, StopIteration=StopIteration, KeyError=KeyError, READ=READ, WRITE=WRITE, ERR=ERR): readers, writers = self.readers, self.writers poll = self.poller.poll fire_timers = self.fire_timers hub_remove = self.remove scheduled = self.timer._queue consolidate = self.consolidate consolidate_callback = self.consolidate_callback on_tick = self.on_tick propagate = self.propagate_errors todo = self._ready while 1: for tick_callback in on_tick: tick_callback() # To avoid infinite loop where one of the callables adds items # to self._ready (via call_soon or otherwise), we take pop only # N items from the ready set. # N represents the current number of items on the set. # That way if a todo adds another one to the ready set, # we will break early and allow execution of readers and writers. current_todos = len(todo) for _ in itertools.repeat(None, current_todos): if not todo: break item = todo.pop() if item: item() poll_timeout = fire_timers(propagate=propagate) if scheduled else 1 # print('[[[HUB]]]: %s' % (self.repr_active(),)) if readers or writers: to_consolidate = [] try: events = poll(poll_timeout) # print('[EVENTS]: %s' % (self.repr_events(events),)) except ValueError: # Issue 882 raise StopIteration() for fd, event in events or (): general_error = False if fd in consolidate and \ writers.get(fd) is None: to_consolidate.append(fd) continue cb = cbargs = None if event & READ: try: cb, cbargs = readers[fd] except KeyError: self.remove_reader(fd) continue elif event & WRITE: try: cb, cbargs = writers[fd] except KeyError: self.remove_writer(fd) continue elif event & ERR: general_error = True else: logger.info(W_UNKNOWN_EVENT, event, fd) general_error = True if general_error: try: cb, cbargs = (readers.get(fd) or writers.get(fd)) except TypeError: pass if cb is None: self.remove(fd) continue if isinstance(cb, generator): try: next(cb) except OSError as exc: if exc.errno != errno.EBADF: raise hub_remove(fd) except StopIteration: pass except Exception: hub_remove(fd) raise else: try: cb(*cbargs) except Empty: pass if to_consolidate: consolidate_callback(to_consolidate) else: # no sockets yet, startup is probably not done. sleep(min(poll_timeout, 0.1)) yield def repr_active(self): from .debug import repr_active return repr_active(self) def repr_events(self, events): from .debug import repr_events return repr_events(self, events or []) @cached_property def scheduler(self): return iter(self.timer) @property def loop(self): if self._loop is None: self._loop = self.create_loop() return self._loop kombu-4.1.0/kombu/pidbox.py0000644000175000017500000003277513130603207015544 0ustar omeromer00000000000000"""Generic process mailbox.""" from __future__ import absolute_import, unicode_literals import socket import warnings from collections import defaultdict, deque from contextlib import contextmanager from copy import copy from itertools import count from threading import local from time import time from . import Exchange, Queue, Consumer, Producer from .clocks import LamportClock from .common import maybe_declare, oid_from from .exceptions import InconsistencyError from .five import range from .log import get_logger from .utils.functional import maybe_evaluate, reprcall from .utils.objects import cached_property from .utils.uuid import uuid W_PIDBOX_IN_USE = """\ A node named {node.hostname} is already using this process mailbox! Maybe you forgot to shutdown the other node or did not do so properly? Or if you meant to start multiple nodes on the same host please make sure you give each node a unique node name! """ __all__ = ['Node', 'Mailbox'] logger = get_logger(__name__) debug, error = logger.debug, logger.error class Node(object): """Mailbox node.""" #: hostname of the node. hostname = None #: the :class:`Mailbox` this is a node for. mailbox = None #: map of method name/handlers. handlers = None #: current context (passed on to handlers) state = None #: current channel. channel = None def __init__(self, hostname, state=None, channel=None, handlers=None, mailbox=None): self.channel = channel self.mailbox = mailbox self.hostname = hostname self.state = state self.adjust_clock = self.mailbox.clock.adjust if handlers is None: handlers = {} self.handlers = handlers def Consumer(self, channel=None, no_ack=True, accept=None, **options): queue = self.mailbox.get_queue(self.hostname) def verify_exclusive(name, messages, consumers): if consumers: warnings.warn(W_PIDBOX_IN_USE.format(node=self)) queue.on_declared = verify_exclusive return Consumer( channel or self.channel, [queue], no_ack=no_ack, accept=self.mailbox.accept if accept is None else accept, **options ) def handler(self, fun): self.handlers[fun.__name__] = fun return fun def on_decode_error(self, message, exc): error('Cannot decode message: %r', exc, exc_info=1) def listen(self, channel=None, callback=None): consumer = self.Consumer(channel=channel, callbacks=[callback or self.handle_message], on_decode_error=self.on_decode_error) consumer.consume() return consumer def dispatch(self, method, arguments=None, reply_to=None, ticket=None, **kwargs): arguments = arguments or {} debug('pidbox received method %s [reply_to:%s ticket:%s]', reprcall(method, (), kwargs=arguments), reply_to, ticket) handle = reply_to and self.handle_call or self.handle_cast try: reply = handle(method, arguments) except SystemExit: raise except Exception as exc: error('pidbox command error: %r', exc, exc_info=1) reply = {'error': repr(exc)} if reply_to: self.reply({self.hostname: reply}, exchange=reply_to['exchange'], routing_key=reply_to['routing_key'], ticket=ticket) return reply def handle(self, method, arguments={}): return self.handlers[method](self.state, **arguments) def handle_call(self, method, arguments): return self.handle(method, arguments) def handle_cast(self, method, arguments): return self.handle(method, arguments) def handle_message(self, body, message=None): destination = body.get('destination') if message: self.adjust_clock(message.headers.get('clock') or 0) if not destination or self.hostname in destination: return self.dispatch(**body) dispatch_from_message = handle_message def reply(self, data, exchange, routing_key, ticket, **kwargs): self.mailbox._publish_reply(data, exchange, routing_key, ticket, channel=self.channel, serializer=self.mailbox.serializer) class Mailbox(object): """Process Mailbox.""" node_cls = Node exchange_fmt = '%s.pidbox' reply_exchange_fmt = 'reply.%s.pidbox' #: Name of application. namespace = None #: Connection (if bound). connection = None #: Exchange type (usually direct, or fanout for broadcast). type = 'direct' #: mailbox exchange (init by constructor). exchange = None #: exchange to send replies to. reply_exchange = None #: Only accepts json messages by default. accept = ['json'] #: Message serializer serializer = None def __init__(self, namespace, type='direct', connection=None, clock=None, accept=None, serializer=None, producer_pool=None, queue_ttl=None, queue_expires=None, reply_queue_ttl=None, reply_queue_expires=10.0): self.namespace = namespace self.connection = connection self.type = type self.clock = LamportClock() if clock is None else clock self.exchange = self._get_exchange(self.namespace, self.type) self.reply_exchange = self._get_reply_exchange(self.namespace) self._tls = local() self.unclaimed = defaultdict(deque) self.accept = self.accept if accept is None else accept self.serializer = self.serializer if serializer is None else serializer self.queue_ttl = queue_ttl self.queue_expires = queue_expires self.reply_queue_ttl = reply_queue_ttl self.reply_queue_expires = reply_queue_expires self._producer_pool = producer_pool def __call__(self, connection): bound = copy(self) bound.connection = connection return bound def Node(self, hostname=None, state=None, channel=None, handlers=None): hostname = hostname or socket.gethostname() return self.node_cls(hostname, state, channel, handlers, mailbox=self) def call(self, destination, command, kwargs={}, timeout=None, callback=None, channel=None): return self._broadcast(command, kwargs, destination, reply=True, timeout=timeout, callback=callback, channel=channel) def cast(self, destination, command, kwargs={}): return self._broadcast(command, kwargs, destination, reply=False) def abcast(self, command, kwargs={}): return self._broadcast(command, kwargs, reply=False) def multi_call(self, command, kwargs={}, timeout=1, limit=None, callback=None, channel=None): return self._broadcast(command, kwargs, reply=True, timeout=timeout, limit=limit, callback=callback, channel=channel) def get_reply_queue(self): oid = self.oid return Queue( '%s.%s' % (oid, self.reply_exchange.name), exchange=self.reply_exchange, routing_key=oid, durable=False, auto_delete=True, expires=self.reply_queue_expires, message_ttl=self.reply_queue_ttl, ) @cached_property def reply_queue(self): return self.get_reply_queue() def get_queue(self, hostname): return Queue( '%s.%s.pidbox' % (hostname, self.namespace), exchange=self.exchange, durable=False, auto_delete=True, expires=self.queue_expires, message_ttl=self.queue_ttl, ) @contextmanager def producer_or_acquire(self, producer=None, channel=None): if producer: yield producer elif self.producer_pool: with self.producer_pool.acquire() as producer: yield producer else: yield Producer(channel, auto_declare=False) def _publish_reply(self, reply, exchange, routing_key, ticket, channel=None, producer=None, **opts): chan = channel or self.connection.default_channel exchange = Exchange(exchange, exchange_type='direct', delivery_mode='transient', durable=False) with self.producer_or_acquire(producer, chan) as producer: try: producer.publish( reply, exchange=exchange, routing_key=routing_key, declare=[exchange], headers={ 'ticket': ticket, 'clock': self.clock.forward(), }, **opts ) except InconsistencyError: # queue probably deleted and no one is expecting a reply. pass def _publish(self, type, arguments, destination=None, reply_ticket=None, channel=None, timeout=None, serializer=None, producer=None): message = {'method': type, 'arguments': arguments, 'destination': destination} chan = channel or self.connection.default_channel exchange = self.exchange if reply_ticket: maybe_declare(self.reply_queue(channel)) message.update(ticket=reply_ticket, reply_to={'exchange': self.reply_exchange.name, 'routing_key': self.oid}) serializer = serializer or self.serializer with self.producer_or_acquire(producer, chan) as producer: producer.publish( message, exchange=exchange.name, declare=[exchange], headers={'clock': self.clock.forward(), 'expires': time() + timeout if timeout else 0}, serializer=serializer, ) def _broadcast(self, command, arguments=None, destination=None, reply=False, timeout=1, limit=None, callback=None, channel=None, serializer=None): if destination is not None and \ not isinstance(destination, (list, tuple)): raise ValueError( 'destination must be a list/tuple not {0}'.format( type(destination))) arguments = arguments or {} reply_ticket = reply and uuid() or None chan = channel or self.connection.default_channel # Set reply limit to number of destinations (if specified) if limit is None and destination: limit = destination and len(destination) or None serializer = serializer or self.serializer self._publish(command, arguments, destination=destination, reply_ticket=reply_ticket, channel=chan, timeout=timeout, serializer=serializer) if reply_ticket: return self._collect(reply_ticket, limit=limit, timeout=timeout, callback=callback, channel=chan) def _collect(self, ticket, limit=None, timeout=1, callback=None, channel=None, accept=None): if accept is None: accept = self.accept chan = channel or self.connection.default_channel queue = self.reply_queue consumer = Consumer(channel, [queue], accept=accept, no_ack=True) responses = [] unclaimed = self.unclaimed adjust_clock = self.clock.adjust try: return unclaimed.pop(ticket) except KeyError: pass def on_message(body, message): # ticket header added in kombu 2.5 header = message.headers.get adjust_clock(header('clock') or 0) expires = header('expires') if expires and time() > expires: return this_id = header('ticket', ticket) if this_id == ticket: if callback: callback(body) responses.append(body) else: unclaimed[this_id].append(body) consumer.register_callback(on_message) try: with consumer: for i in limit and range(limit) or count(): try: self.connection.drain_events(timeout=timeout) except socket.timeout: break return responses finally: chan.after_reply_message_received(queue.name) def _get_exchange(self, namespace, type): return Exchange(self.exchange_fmt % namespace, type=type, durable=False, delivery_mode='transient') def _get_reply_exchange(self, namespace): return Exchange(self.reply_exchange_fmt % namespace, type='direct', durable=False, delivery_mode='transient') @cached_property def oid(self): try: return self._tls.OID except AttributeError: oid = self._tls.OID = oid_from(self) return oid @cached_property def producer_pool(self): return maybe_evaluate(self._producer_pool) kombu-4.1.0/kombu/abstract.py0000644000175000017500000000667313130603207016060 0ustar omeromer00000000000000"""Object utilities.""" from __future__ import absolute_import, unicode_literals from copy import copy from .connection import maybe_channel from .exceptions import NotBoundError from .five import python_2_unicode_compatible from .utils.functional import ChannelPromise __all__ = ['Object', 'MaybeChannelBound'] def unpickle_dict(cls, kwargs): return cls(**kwargs) def _any(v): return v class Object(object): """Common base class. Supports automatic kwargs->attributes handling, and cloning. """ attrs = () def __init__(self, *args, **kwargs): for name, type_ in self.attrs: value = kwargs.get(name) if value is not None: setattr(self, name, (type_ or _any)(value)) else: try: getattr(self, name) except AttributeError: setattr(self, name, None) def as_dict(self, recurse=False): def f(obj, type): if recurse and isinstance(obj, Object): return obj.as_dict(recurse=True) return type(obj) if type and obj is not None else obj return { attr: f(getattr(self, attr), type) for attr, type in self.attrs } def __reduce__(self): return unpickle_dict, (self.__class__, self.as_dict()) def __copy__(self): return self.__class__(**self.as_dict()) @python_2_unicode_compatible class MaybeChannelBound(Object): """Mixin for classes that can be bound to an AMQP channel.""" _channel = None _is_bound = False #: Defines whether maybe_declare can skip declaring this entity twice. can_cache_declaration = False def __call__(self, channel): """`self(channel) -> self.bind(channel)`.""" return self.bind(channel) def bind(self, channel): """Create copy of the instance that is bound to a channel.""" return copy(self).maybe_bind(channel) def maybe_bind(self, channel): """Bind instance to channel if not already bound.""" if not self.is_bound and channel: self._channel = maybe_channel(channel) self.when_bound() self._is_bound = True return self def revive(self, channel): """Revive channel after the connection has been re-established. Used by :meth:`~kombu.Connection.ensure`. """ if self.is_bound: self._channel = channel self.when_bound() def when_bound(self): """Callback called when the class is bound.""" pass def __repr__(self): return self._repr_entity(type(self).__name__) def _repr_entity(self, item=''): item = item or type(self).__name__ if self.is_bound: return '<{0} bound to chan:{1}>'.format( item or type(self).__name__, self.channel.channel_id) return ''.format(item) @property def is_bound(self): """Flag set if the channel is bound.""" return self._is_bound and self._channel is not None @property def channel(self): """Current channel if the object is bound.""" channel = self._channel if channel is None: raise NotBoundError( "Can't call method on {0} not bound to a channel".format( type(self).__name__)) if isinstance(channel, ChannelPromise): channel = self._channel = channel() return channel kombu-4.1.0/kombu/pools.py0000644000175000017500000000777013130603207015410 0ustar omeromer00000000000000"""Public resource pools.""" from __future__ import absolute_import, unicode_literals import os from itertools import chain from .connection import Resource from .five import range, values from .messaging import Producer from .utils.collections import EqualityDict from .utils.compat import register_after_fork from .utils.functional import lazy __all__ = ['ProducerPool', 'PoolGroup', 'register_group', 'connections', 'producers', 'get_limit', 'set_limit', 'reset'] _limit = [10] _groups = [] use_global_limit = object() disable_limit_protection = os.environ.get('KOMBU_DISABLE_LIMIT_PROTECTION') def _after_fork_cleanup_group(group): group.clear() class ProducerPool(Resource): """Pool of :class:`kombu.Producer` instances.""" Producer = Producer close_after_fork = True def __init__(self, connections, *args, **kwargs): self.connections = connections self.Producer = kwargs.pop('Producer', None) or self.Producer super(ProducerPool, self).__init__(*args, **kwargs) def _acquire_connection(self): return self.connections.acquire(block=True) def create_producer(self): conn = self._acquire_connection() try: return self.Producer(conn) except BaseException: conn.release() raise def new(self): return lazy(self.create_producer) def setup(self): if self.limit: for _ in range(self.limit): self._resource.put_nowait(self.new()) def close_resource(self, resource): pass def prepare(self, p): if callable(p): p = p() if p._channel is None: conn = self._acquire_connection() try: p.revive(conn) except BaseException: conn.release() raise return p def release(self, resource): if resource.__connection__: resource.__connection__.release() resource.channel = None super(ProducerPool, self).release(resource) class PoolGroup(EqualityDict): """Collection of resource pools.""" def __init__(self, limit=None, close_after_fork=True): self.limit = limit self.close_after_fork = close_after_fork if self.close_after_fork and register_after_fork is not None: register_after_fork(self, _after_fork_cleanup_group) def create(self, resource, limit): raise NotImplementedError('PoolGroups must define ``create``') def __missing__(self, resource): limit = self.limit if limit is use_global_limit: limit = get_limit() k = self[resource] = self.create(resource, limit) return k def register_group(group): """Register group (can be used as decorator).""" _groups.append(group) return group class Connections(PoolGroup): """Collection of connection pools.""" def create(self, connection, limit): return connection.Pool(limit=limit) connections = register_group(Connections(limit=use_global_limit)) # noqa: E305 class Producers(PoolGroup): """Collection of producer pools.""" def create(self, connection, limit): return ProducerPool(connections[connection], limit=limit) producers = register_group(Producers(limit=use_global_limit)) # noqa: E305 def _all_pools(): return chain(*[(values(g) if g else iter([])) for g in _groups]) def get_limit(): """Get current connection pool limit.""" return _limit[0] def set_limit(limit, force=False, reset_after=False, ignore_errors=False): """Set new connection pool limit.""" limit = limit or 0 glimit = _limit[0] or 0 if limit != glimit: _limit[0] = limit for pool in _all_pools(): pool.resize(limit) return limit def reset(*args, **kwargs): """Reset all pools by closing open resources.""" for pool in _all_pools(): try: pool.force_close_all() except Exception: pass for group in _groups: group.clear() kombu-4.1.0/kombu/resource.py0000644000175000017500000001617213130603207016077 0ustar omeromer00000000000000"""Generic resource pool implementation.""" from __future__ import absolute_import, unicode_literals import os from collections import deque from . import exceptions from .five import Empty, LifoQueue as _LifoQueue from .utils.compat import register_after_fork from .utils.functional import lazy def _after_fork_cleanup_resource(resource): try: resource.force_close_all() except Exception: pass class LifoQueue(_LifoQueue): """Last in first out version of Queue.""" def _init(self, maxsize): self.queue = deque() class Resource(object): """Pool of resources.""" LimitExceeded = exceptions.LimitExceeded close_after_fork = False def __init__(self, limit=None, preload=None, close_after_fork=None): self._limit = limit self.preload = preload or 0 self._closed = False self.close_after_fork = ( close_after_fork if close_after_fork is not None else self.close_after_fork ) self._resource = LifoQueue() self._dirty = set() if self.close_after_fork and register_after_fork is not None: register_after_fork(self, _after_fork_cleanup_resource) self.setup() def setup(self): raise NotImplementedError('subclass responsibility') def _add_when_empty(self): if self.limit and len(self._dirty) >= self.limit: raise self.LimitExceeded(self.limit) # All taken, put new on the queue and # try get again, this way the first in line # will get the resource. self._resource.put_nowait(self.new()) def acquire(self, block=False, timeout=None): """Acquire resource. Arguments: block (bool): If the limit is exceeded, then block until there is an available item. timeout (float): Timeout to wait if ``block`` is true. Default is :const:`None` (forever). Raises: LimitExceeded: if block is false and the limit has been exceeded. """ if self._closed: raise RuntimeError('Acquire on closed pool') if self.limit: while 1: try: R = self._resource.get(block=block, timeout=timeout) except Empty: self._add_when_empty() else: try: R = self.prepare(R) except BaseException: if isinstance(R, lazy): # not evaluated yet, just put it back self._resource.put_nowait(R) else: # evaluted so must try to release/close first. self.release(R) raise self._dirty.add(R) break else: R = self.prepare(self.new()) def release(): """Release resource so it can be used by another thread. Warnings: The caller is responsible for discarding the object, and to never use the resource again. A new resource must be acquired if so needed. """ self.release(R) R.release = release return R def prepare(self, resource): return resource def close_resource(self, resource): resource.close() def release_resource(self, resource): pass def replace(self, resource): """Replace existing resource with a new instance. This can be used in case of defective resources. """ if self.limit: self._dirty.discard(resource) self.close_resource(resource) def release(self, resource): if self.limit: self._dirty.discard(resource) self._resource.put_nowait(resource) self.release_resource(resource) else: self.close_resource(resource) def collect_resource(self, resource): pass def force_close_all(self): """Close and remove all resources in the pool (also those in use). Used to close resources from parent processes after fork (e.g. sockets/connections). """ if self._closed: return self._closed = True dirty = self._dirty resource = self._resource while 1: # - acquired try: dres = dirty.pop() except KeyError: break try: self.collect_resource(dres) except AttributeError: # Issue #78 pass while 1: # - available # deque supports '.clear', but lists do not, so for that # reason we use pop here, so that the underlying object can # be any object supporting '.pop' and '.append'. try: res = resource.queue.pop() except IndexError: break try: self.collect_resource(res) except AttributeError: pass # Issue #78 def resize(self, limit, force=False, ignore_errors=False, reset=False): prev_limit = self._limit if (self._dirty and limit < self._limit) and not ignore_errors: if not force: raise RuntimeError( "Can't shrink pool when in use: was={0} now={1}".format( limit, self._limit)) reset = True self._limit = limit if reset: try: self.force_close_all() except Exception: pass self.setup() if limit < prev_limit: self._shrink_down() def _shrink_down(self): resource = self._resource # Items to the left are last recently used, so we remove those first. with resource.mutex: while len(resource.queue) > self.limit: self.collect_resource(resource.queue.popleft()) @property def limit(self): return self._limit @limit.setter def limit(self, limit): self.resize(limit) if os.environ.get('KOMBU_DEBUG_POOL'): # pragma: no cover _orig_acquire = acquire _orig_release = release _next_resource_id = 0 def acquire(self, *args, **kwargs): # noqa import traceback id = self._next_resource_id = self._next_resource_id + 1 print('+{0} ACQUIRE {1}'.format(id, self.__class__.__name__)) r = self._orig_acquire(*args, **kwargs) r._resource_id = id print('-{0} ACQUIRE {1}'.format(id, self.__class__.__name__)) if not hasattr(r, 'acquired_by'): r.acquired_by = [] r.acquired_by.append(traceback.format_stack()) return r def release(self, resource): # noqa id = resource._resource_id print('+{0} RELEASE {1}'.format(id, self.__class__.__name__)) r = self._orig_release(resource) print('-{0} RELEASE {1}'.format(id, self.__class__.__name__)) self._next_resource_id -= 1 return r kombu-4.1.0/kombu/simple.py0000644000175000017500000001152113130603207015532 0ustar omeromer00000000000000"""Simple messaging interface.""" from __future__ import absolute_import, unicode_literals import socket from collections import deque from . import entity from . import messaging from .connection import maybe_channel from .five import Empty, monotonic __all__ = ['SimpleQueue', 'SimpleBuffer'] class SimpleBase(object): Empty = Empty _consuming = False def __enter__(self): return self def __exit__(self, *exc_info): self.close() def __init__(self, channel, producer, consumer, no_ack=False): self.channel = maybe_channel(channel) self.producer = producer self.consumer = consumer self.no_ack = no_ack self.queue = self.consumer.queues[0] self.buffer = deque() self.consumer.register_callback(self._receive) def get(self, block=True, timeout=None): if not block: return self.get_nowait() self._consume() time_start = monotonic() remaining = timeout while True: if self.buffer: return self.buffer.popleft() if remaining is not None and remaining <= 0.0: raise self.Empty() try: # The `drain_events` method will # block on the socket connection to rabbitmq. if any # application-level messages are received, it will put them # into `self.buffer`. # * The method will block for UP TO `timeout` milliseconds. # * The method may raise a socket.timeout exception; or... # * The method may return without having put anything on # `self.buffer`. This is because internal heartbeat # messages are sent over the same socket; also POSIX makes # no guarantees against socket calls returning early. self.channel.connection.client.drain_events(timeout=remaining) except socket.timeout: raise self.Empty() if remaining is not None: elapsed = monotonic() - time_start remaining = timeout - elapsed def get_nowait(self): m = self.queue.get(no_ack=self.no_ack) if not m: raise self.Empty() return m def put(self, message, serializer=None, headers=None, compression=None, routing_key=None, **kwargs): self.producer.publish(message, serializer=serializer, routing_key=routing_key, headers=headers, compression=compression, **kwargs) def clear(self): return self.consumer.purge() def qsize(self): _, size, _ = self.queue.queue_declare(passive=True) return size def close(self): self.consumer.cancel() def _receive(self, message_data, message): self.buffer.append(message) def _consume(self): if not self._consuming: self.consumer.consume(no_ack=self.no_ack) self._consuming = True def __len__(self): """`len(self) -> self.qsize()`.""" return self.qsize() def __bool__(self): return True __nonzero__ = __bool__ class SimpleQueue(SimpleBase): """Simple API for persistent queues.""" no_ack = False queue_opts = {} exchange_opts = {'type': 'direct'} def __init__(self, channel, name, no_ack=None, queue_opts=None, exchange_opts=None, serializer=None, compression=None, **kwargs): queue = name queue_opts = dict(self.queue_opts, **queue_opts or {}) exchange_opts = dict(self.exchange_opts, **exchange_opts or {}) if no_ack is None: no_ack = self.no_ack if not isinstance(queue, entity.Queue): exchange = entity.Exchange(name, **exchange_opts) queue = entity.Queue(name, exchange, name, **queue_opts) routing_key = name else: name = queue.name exchange = queue.exchange routing_key = queue.routing_key consumer = messaging.Consumer(channel, queue) producer = messaging.Producer(channel, exchange, serializer=serializer, routing_key=routing_key, compression=compression) super(SimpleQueue, self).__init__(channel, producer, consumer, no_ack, **kwargs) class SimpleBuffer(SimpleQueue): """Simple API for ephemeral queues.""" no_ack = True queue_opts = dict(durable=False, auto_delete=True) exchange_opts = dict(durable=False, delivery_mode='transient', auto_delete=True) kombu-4.1.0/kombu/entity.py0000644000175000017500000007757313130603207015600 0ustar omeromer00000000000000"""Exchange and Queue declarations.""" from __future__ import absolute_import, unicode_literals import numbers from .abstract import MaybeChannelBound, Object from .exceptions import ContentDisallowed from .five import python_2_unicode_compatible, string_t from .serialization import prepare_accept_content TRANSIENT_DELIVERY_MODE = 1 PERSISTENT_DELIVERY_MODE = 2 DELIVERY_MODES = {'transient': TRANSIENT_DELIVERY_MODE, 'persistent': PERSISTENT_DELIVERY_MODE} __all__ = ['Exchange', 'Queue', 'binding', 'maybe_delivery_mode'] INTERNAL_EXCHANGE_PREFIX = ('amq.',) def _reprstr(s): s = repr(s) if isinstance(s, string_t) and s.startswith("u'"): return s[2:-1] return s[1:-1] def pretty_bindings(bindings): return '[{0}]'.format(', '.join(map(str, bindings))) def maybe_delivery_mode( v, modes=DELIVERY_MODES, default=PERSISTENT_DELIVERY_MODE): """Get delivery mode by name (or none if undefined).""" if v: return v if isinstance(v, numbers.Integral) else modes[v] return default @python_2_unicode_compatible class Exchange(MaybeChannelBound): """An Exchange declaration. Arguments: name (str): See :attr:`name`. type (str): See :attr:`type`. channel (kombu.Connection, ChannelT): See :attr:`channel`. durable (bool): See :attr:`durable`. auto_delete (bool): See :attr:`auto_delete`. delivery_mode (enum): See :attr:`delivery_mode`. arguments (Dict): See :attr:`arguments`. no_declare (bool): See :attr:`no_declare` Attributes: name (str): Name of the exchange. Default is no name (the default exchange). type (str): *This description of AMQP exchange types was shamelessly stolen from the blog post `AMQP in 10 minutes: Part 4`_ by Rajith Attapattu. Reading this article is recommended if you're new to amqp.* "AMQP defines four default exchange types (routing algorithms) that covers most of the common messaging use cases. An AMQP broker can also define additional exchange types, so see your broker manual for more information about available exchange types. * `direct` (*default*) Direct match between the routing key in the message, and the routing criteria used when a queue is bound to this exchange. * `topic` Wildcard match between the routing key and the routing pattern specified in the exchange/queue binding. The routing key is treated as zero or more words delimited by `"."` and supports special wildcard characters. `"*"` matches a single word and `"#"` matches zero or more words. * `fanout` Queues are bound to this exchange with no arguments. Hence any message sent to this exchange will be forwarded to all queues bound to this exchange. * `headers` Queues are bound to this exchange with a table of arguments containing headers and values (optional). A special argument named "x-match" determines the matching algorithm, where `"all"` implies an `AND` (all pairs must match) and `"any"` implies `OR` (at least one pair must match). :attr:`arguments` is used to specify the arguments. .. _`AMQP in 10 minutes: Part 4`: https://bit.ly/2rcICv5 channel (ChannelT): The channel the exchange is bound to (if bound). durable (bool): Durable exchanges remain active when a server restarts. Non-durable exchanges (transient exchanges) are purged when a server restarts. Default is :const:`True`. auto_delete (bool): If set, the exchange is deleted when all queues have finished using it. Default is :const:`False`. delivery_mode (enum): The default delivery mode used for messages. The value is an integer, or alias string. * 1 or `"transient"` The message is transient. Which means it is stored in memory only, and is lost if the server dies or restarts. * 2 or "persistent" (*default*) The message is persistent. Which means the message is stored both in-memory, and on disk, and therefore preserved if the server dies or restarts. The default value is 2 (persistent). arguments (Dict): Additional arguments to specify when the exchange is declared. no_declare (bool): Never declare this exchange (:meth:`declare` does nothing). """ TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE name = '' type = 'direct' durable = True auto_delete = False passive = False delivery_mode = None no_declare = False attrs = ( ('name', None), ('type', None), ('arguments', None), ('durable', bool), ('passive', bool), ('auto_delete', bool), ('delivery_mode', lambda m: DELIVERY_MODES.get(m) or m), ('no_declare', bool), ) def __init__(self, name='', type='', channel=None, **kwargs): super(Exchange, self).__init__(**kwargs) self.name = name or self.name self.type = type or self.type self.maybe_bind(channel) def __hash__(self): return hash('E|%s' % (self.name,)) def _can_declare(self): return not self.no_declare and ( self.name and not self.name.startswith( INTERNAL_EXCHANGE_PREFIX)) def declare(self, nowait=False, passive=None, channel=None): """Declare the exchange. Creates the exchange on the broker, unless passive is set in which case it will only assert that the exchange exists. Argument: nowait (bool): If set the server will not respond, and a response will not be waited for. Default is :const:`False`. """ if self._can_declare(): passive = self.passive if passive is None else passive return (channel or self.channel).exchange_declare( exchange=self.name, type=self.type, durable=self.durable, auto_delete=self.auto_delete, arguments=self.arguments, nowait=nowait, passive=passive, ) def bind_to(self, exchange='', routing_key='', arguments=None, nowait=False, channel=None, **kwargs): """Bind the exchange to another exchange. Arguments: nowait (bool): If set the server will not respond, and the call will not block waiting for a response. Default is :const:`False`. """ if isinstance(exchange, Exchange): exchange = exchange.name return (channel or self.channel).exchange_bind( destination=self.name, source=exchange, routing_key=routing_key, nowait=nowait, arguments=arguments, ) def unbind_from(self, source='', routing_key='', nowait=False, arguments=None, channel=None): """Delete previously created exchange binding from the server.""" if isinstance(source, Exchange): source = source.name return (channel or self.channel).exchange_unbind( destination=self.name, source=source, routing_key=routing_key, nowait=nowait, arguments=arguments, ) def Message(self, body, delivery_mode=None, properties=None, **kwargs): """Create message instance to be sent with :meth:`publish`. Arguments: body (Any): Message body. delivery_mode (bool): Set custom delivery mode. Defaults to :attr:`delivery_mode`. priority (int): Message priority, 0 to broker configured max priority, where higher is better. content_type (str): The messages content_type. If content_type is set, no serialization occurs as it is assumed this is either a binary object, or you've done your own serialization. Leave blank if using built-in serialization as our library properly sets content_type. content_encoding (str): The character set in which this object is encoded. Use "binary" if sending in raw binary objects. Leave blank if using built-in serialization as our library properly sets content_encoding. properties (Dict): Message properties. headers (Dict): Message headers. """ # XXX This method is unused by kombu itself AFAICT [ask]. properties = {} if properties is None else properties properties['delivery_mode'] = maybe_delivery_mode(self.delivery_mode) return self.channel.prepare_message( body, properties=properties, **kwargs) def publish(self, message, routing_key=None, mandatory=False, immediate=False, exchange=None): """Publish message. Arguments: message (Union[kombu.Message, str, bytes]): Message to publish. routing_key (str): Message routing key. mandatory (bool): Currently not supported. immediate (bool): Currently not supported. """ if isinstance(message, string_t): message = self.Message(message) exchange = exchange or self.name return self.channel.basic_publish( message, exchange=exchange, routing_key=routing_key, mandatory=mandatory, immediate=immediate, ) def delete(self, if_unused=False, nowait=False): """Delete the exchange declaration on server. Arguments: if_unused (bool): Delete only if the exchange has no bindings. Default is :const:`False`. nowait (bool): If set the server will not respond, and a response will not be waited for. Default is :const:`False`. """ return self.channel.exchange_delete(exchange=self.name, if_unused=if_unused, nowait=nowait) def binding(self, routing_key='', arguments=None, unbind_arguments=None): return binding(self, routing_key, arguments, unbind_arguments) def __eq__(self, other): if isinstance(other, Exchange): return (self.name == other.name and self.type == other.type and self.arguments == other.arguments and self.durable == other.durable and self.auto_delete == other.auto_delete and self.delivery_mode == other.delivery_mode) return NotImplemented def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return self._repr_entity(self) def __str__(self): return 'Exchange {0}({1})'.format( _reprstr(self.name) or repr(''), self.type, ) @property def can_cache_declaration(self): return not self.auto_delete @python_2_unicode_compatible class binding(Object): """Represents a queue or exchange binding. Arguments: exchange (Exchange): Exchange to bind to. routing_key (str): Routing key used as binding key. arguments (Dict): Arguments for bind operation. unbind_arguments (Dict): Arguments for unbind operation. """ attrs = ( ('exchange', None), ('routing_key', None), ('arguments', None), ('unbind_arguments', None) ) def __init__(self, exchange=None, routing_key='', arguments=None, unbind_arguments=None): self.exchange = exchange self.routing_key = routing_key self.arguments = arguments self.unbind_arguments = unbind_arguments def declare(self, channel, nowait=False): """Declare destination exchange.""" if self.exchange and self.exchange.name: self.exchange.declare(channel=channel, nowait=nowait) def bind(self, entity, nowait=False, channel=None): """Bind entity to this binding.""" entity.bind_to(exchange=self.exchange, routing_key=self.routing_key, arguments=self.arguments, nowait=nowait, channel=channel) def unbind(self, entity, nowait=False, channel=None): """Unbind entity from this binding.""" entity.unbind_from(self.exchange, routing_key=self.routing_key, arguments=self.unbind_arguments, nowait=nowait, channel=channel) def __repr__(self): return ''.format(self) def __str__(self): return '{0}->{1}'.format( _reprstr(self.exchange.name), _reprstr(self.routing_key), ) @python_2_unicode_compatible class Queue(MaybeChannelBound): """A Queue declaration. Arguments: name (str): See :attr:`name`. exchange (Exchange, str): See :attr:`exchange`. routing_key (str): See :attr:`routing_key`. channel (kombu.Connection, ChannelT): See :attr:`channel`. durable (bool): See :attr:`durable`. exclusive (bool): See :attr:`exclusive`. auto_delete (bool): See :attr:`auto_delete`. queue_arguments (Dict): See :attr:`queue_arguments`. binding_arguments (Dict): See :attr:`binding_arguments`. consumer_arguments (Dict): See :attr:`consumer_arguments`. no_declare (bool): See :attr:`no_declare`. on_declared (Callable): See :attr:`on_declared`. expires (float): See :attr:`expires`. message_ttl (float): See :attr:`message_ttl`. max_length (int): See :attr:`max_length`. max_length_bytes (int): See :attr:`max_length_bytes`. max_priority (int): See :attr:`max_priority`. Attributes: name (str): Name of the queue. Default is no name (default queue destination). exchange (Exchange): The :class:`Exchange` the queue binds to. routing_key (str): The routing key (if any), also called *binding key*. The interpretation of the routing key depends on the :attr:`Exchange.type`. * direct exchange Matches if the routing key property of the message and the :attr:`routing_key` attribute are identical. * fanout exchange Always matches, even if the binding does not have a key. * topic exchange Matches the routing key property of the message by a primitive pattern matching scheme. The message routing key then consists of words separated by dots (`"."`, like domain names), and two special characters are available; star (`"*"`) and hash (`"#"`). The star matches any word, and the hash matches zero or more words. For example `"*.stock.#"` matches the routing keys `"usd.stock"` and `"eur.stock.db"` but not `"stock.nasdaq"`. channel (ChannelT): The channel the Queue is bound to (if bound). durable (bool): Durable queues remain active when a server restarts. Non-durable queues (transient queues) are purged if/when a server restarts. Note that durable queues do not necessarily hold persistent messages, although it does not make sense to send persistent messages to a transient queue. Default is :const:`True`. exclusive (bool): Exclusive queues may only be consumed from by the current connection. Setting the 'exclusive' flag always implies 'auto-delete'. Default is :const:`False`. auto_delete (bool): If set, the queue is deleted when all consumers have finished using it. Last consumer can be canceled either explicitly or because its channel is closed. If there was no consumer ever on the queue, it won't be deleted. expires (float): Set the expiry time (in seconds) for when this queue should expire. The expiry time decides how long the queue can stay unused before it's automatically deleted. *Unused* means the queue has no consumers, the queue has not been redeclared, and ``Queue.get`` has not been invoked for a duration of at least the expiration period. See https://www.rabbitmq.com/ttl.html#queue-ttl **RabbitMQ extension**: Only available when using RabbitMQ. message_ttl (float): Message time to live in seconds. This setting controls how long messages can stay in the queue unconsumed. If the expiry time passes before a message consumer has received the message, the message is deleted and no consumer will see the message. See https://www.rabbitmq.com/ttl.html#per-queue-message-ttl **RabbitMQ extension**: Only available when using RabbitMQ. max_length (int): Set the maximum number of messages that the queue can hold. If the number of messages in the queue size exceeds this limit, new messages will be dropped (or dead-lettered if a dead letter exchange is active). See https://www.rabbitmq.com/maxlength.html **RabbitMQ extension**: Only available when using RabbitMQ. max_length_bytes (int): Set the max size (in bytes) for the total of messages in the queue. If the total size of all the messages in the queue exceeds this limit, new messages will be dropped (or dead-lettered if a dead letter exchange is active). **RabbitMQ extension**: Only available when using RabbitMQ. max_priority (int): Set the highest priority number for this queue. For example if the value is 10, then messages can delivered to this queue can have a ``priority`` value between 0 and 10, where 10 is the highest priority. RabbitMQ queues without a max priority set will ignore the priority field in the message, so if you want priorities you need to set the max priority field to declare the queue as a priority queue. **RabbitMQ extension**: Only available when using RabbitMQ. queue_arguments (Dict): Additional arguments used when declaring the queue. Can be used to to set the arguments value for RabbitMQ/AMQP's ``queue.declare``. binding_arguments (Dict): Additional arguments used when binding the queue. Can be used to to set the arguments value for RabbitMQ/AMQP's ``queue.declare``. consumer_arguments (Dict): Additional arguments used when consuming from this queue. Can be used to to set the arguments value for RabbitMQ/AMQP's ``basic.consume``. alias (str): Unused in Kombu, but applications can take advantage of this, for example to give alternate names to queues with utomatically generated queue names. on_declared (Callable): Optional callback to be applied when the queue has been declared (the ``queue_declare`` operation is complete). This must be a function with a signature that accepts at least 3 positional arguments: ``(name, messages, consumers)``. no_declare (bool): Never declare this queue, nor related entities (:meth:`declare` does nothing). """ ContentDisallowed = ContentDisallowed name = '' exchange = Exchange('') routing_key = '' durable = True exclusive = False auto_delete = False no_ack = False attrs = ( ('name', None), ('exchange', None), ('routing_key', None), ('queue_arguments', None), ('binding_arguments', None), ('consumer_arguments', None), ('durable', bool), ('exclusive', bool), ('auto_delete', bool), ('no_ack', None), ('alias', None), ('bindings', list), ('no_declare', bool), ('expires', float), ('message_ttl', float), ('max_length', int), ('max_length_bytes', int), ('max_priority', int) ) def __init__(self, name='', exchange=None, routing_key='', channel=None, bindings=None, on_declared=None, **kwargs): super(Queue, self).__init__(**kwargs) self.name = name or self.name self.exchange = exchange or self.exchange self.routing_key = routing_key or self.routing_key self.bindings = set(bindings or []) self.on_declared = on_declared # allows Queue('name', [binding(...), binding(...), ...]) if isinstance(exchange, (list, tuple, set)): self.bindings |= set(exchange) if self.bindings: self.exchange = None # exclusive implies auto-delete. if self.exclusive: self.auto_delete = True self.maybe_bind(channel) def bind(self, channel): on_declared = self.on_declared bound = super(Queue, self).bind(channel) bound.on_declared = on_declared return bound def __hash__(self): return hash('Q|%s' % (self.name,)) def when_bound(self): if self.exchange: self.exchange = self.exchange(self.channel) def declare(self, nowait=False, channel=None): """Declare queue and exchange then binds queue to exchange.""" if not self.no_declare: # - declare main binding. self._create_exchange(nowait=nowait, channel=channel) self._create_queue(nowait=nowait, channel=channel) self._create_bindings(nowait=nowait, channel=channel) return self.name def _create_exchange(self, nowait=False, channel=None): if self.exchange: self.exchange.declare(nowait=nowait, channel=channel) def _create_queue(self, nowait=False, channel=None): self.queue_declare(nowait=nowait, passive=False, channel=channel) if self.exchange and self.exchange.name: self.queue_bind(nowait=nowait, channel=channel) def _create_bindings(self, nowait=False, channel=None): for B in self.bindings: channel = channel or self.channel B.declare(channel) B.bind(self, nowait=nowait, channel=channel) def queue_declare(self, nowait=False, passive=False, channel=None): """Declare queue on the server. Arguments: nowait (bool): Do not wait for a reply. passive (bool): If set, the server will not create the queue. The client can use this to check whether a queue exists without modifying the server state. """ channel = channel or self.channel queue_arguments = channel.prepare_queue_arguments( self.queue_arguments or {}, expires=self.expires, message_ttl=self.message_ttl, max_length=self.max_length, max_length_bytes=self.max_length_bytes, max_priority=self.max_priority, ) ret = channel.queue_declare( queue=self.name, passive=passive, durable=self.durable, exclusive=self.exclusive, auto_delete=self.auto_delete, arguments=queue_arguments, nowait=nowait, ) if not self.name: self.name = ret[0] if self.on_declared: self.on_declared(*ret) return ret def queue_bind(self, nowait=False, channel=None): """Create the queue binding on the server.""" return self.bind_to(self.exchange, self.routing_key, self.binding_arguments, channel=channel, nowait=nowait) def bind_to(self, exchange='', routing_key='', arguments=None, nowait=False, channel=None): if isinstance(exchange, Exchange): exchange = exchange.name return (channel or self.channel).queue_bind( queue=self.name, exchange=exchange, routing_key=routing_key, arguments=arguments, nowait=nowait, ) def get(self, no_ack=None, accept=None): """Poll the server for a new message. This method provides direct access to the messages in a queue using a synchronous dialogue, designed for specific types of applications where synchronous functionality is more important than performance. Returns: ~kombu.Message: if a message was available, or :const:`None` otherwise. Arguments: no_ack (bool): If enabled the broker will automatically ack messages. accept (Set[str]): Custom list of accepted content types. """ no_ack = self.no_ack if no_ack is None else no_ack message = self.channel.basic_get(queue=self.name, no_ack=no_ack) if message is not None: m2p = getattr(self.channel, 'message_to_python', None) if m2p: message = m2p(message) if message.errors: message._reraise_error() message.accept = prepare_accept_content(accept) return message def purge(self, nowait=False): """Remove all ready messages from the queue.""" return self.channel.queue_purge(queue=self.name, nowait=nowait) or 0 def consume(self, consumer_tag='', callback=None, no_ack=None, nowait=False): """Start a queue consumer. Consumers last as long as the channel they were created on, or until the client cancels them. Arguments: consumer_tag (str): Unique identifier for the consumer. The consumer tag is local to a connection, so two clients can use the same consumer tags. If this field is empty the server will generate a unique tag. no_ack (bool): If enabled the broker will automatically ack messages. nowait (bool): Do not wait for a reply. callback (Callable): callback called for each delivered message. """ if no_ack is None: no_ack = self.no_ack return self.channel.basic_consume( queue=self.name, no_ack=no_ack, consumer_tag=consumer_tag or '', callback=callback, nowait=nowait, arguments=self.consumer_arguments) def cancel(self, consumer_tag): """Cancel a consumer by consumer tag.""" return self.channel.basic_cancel(consumer_tag) def delete(self, if_unused=False, if_empty=False, nowait=False): """Delete the queue. Arguments: if_unused (bool): If set, the server will only delete the queue if it has no consumers. A channel error will be raised if the queue has consumers. if_empty (bool): If set, the server will only delete the queue if it is empty. If it is not empty a channel error will be raised. nowait (bool): Do not wait for a reply. """ return self.channel.queue_delete(queue=self.name, if_unused=if_unused, if_empty=if_empty, nowait=nowait) def queue_unbind(self, arguments=None, nowait=False, channel=None): return self.unbind_from(self.exchange, self.routing_key, arguments, nowait, channel) def unbind_from(self, exchange='', routing_key='', arguments=None, nowait=False, channel=None): """Unbind queue by deleting the binding from the server.""" return (channel or self.channel).queue_unbind( queue=self.name, exchange=exchange.name, routing_key=routing_key, arguments=arguments, nowait=nowait, ) def __eq__(self, other): if isinstance(other, Queue): return (self.name == other.name and self.exchange == other.exchange and self.routing_key == other.routing_key and self.queue_arguments == other.queue_arguments and self.binding_arguments == other.binding_arguments and self.consumer_arguments == other.consumer_arguments and self.durable == other.durable and self.exclusive == other.exclusive and self.auto_delete == other.auto_delete) return NotImplemented def __ne__(self, other): return not self.__eq__(other) def __repr__(self): if self.bindings: return self._repr_entity('Queue {name} -> {bindings}'.format( name=_reprstr(self.name), bindings=pretty_bindings(self.bindings), )) return self._repr_entity( 'Queue {name} -> {0.exchange!r} -> {routing_key}'.format( self, name=_reprstr(self.name), routing_key=_reprstr(self.routing_key), ), ) @property def can_cache_declaration(self): return not self.auto_delete @classmethod def from_dict(cls, queue, **options): binding_key = options.get('binding_key') or options.get('routing_key') e_durable = options.get('exchange_durable') if e_durable is None: e_durable = options.get('durable') e_auto_delete = options.get('exchange_auto_delete') if e_auto_delete is None: e_auto_delete = options.get('auto_delete') q_durable = options.get('queue_durable') if q_durable is None: q_durable = options.get('durable') q_auto_delete = options.get('queue_auto_delete') if q_auto_delete is None: q_auto_delete = options.get('auto_delete') e_arguments = options.get('exchange_arguments') q_arguments = options.get('queue_arguments') b_arguments = options.get('binding_arguments') c_arguments = options.get('consumer_arguments') bindings = options.get('bindings') exchange = Exchange(options.get('exchange'), type=options.get('exchange_type'), delivery_mode=options.get('delivery_mode'), routing_key=options.get('routing_key'), durable=e_durable, auto_delete=e_auto_delete, arguments=e_arguments) return Queue(queue, exchange=exchange, routing_key=binding_key, durable=q_durable, exclusive=options.get('exclusive'), auto_delete=q_auto_delete, no_ack=options.get('no_ack'), queue_arguments=q_arguments, binding_arguments=b_arguments, consumer_arguments=c_arguments, bindings=bindings) def as_dict(self, recurse=False): res = super(Queue, self).as_dict(recurse) if not recurse: return res bindings = res.get('bindings') if bindings: res['bindings'] = [b.as_dict(recurse=True) for b in bindings] return res kombu-4.1.0/extra/0000755000175000017500000000000013134154263013704 5ustar omeromer00000000000000kombu-4.1.0/extra/requirements/0000755000175000017500000000000013134154263016427 5ustar omeromer00000000000000kombu-4.1.0/extra/requirements/default.txt0000644000175000017500000000002113130603207020576 0ustar omeromer00000000000000amqp>=2.1.4,<3.0 kombu-4.1.0/extra/requirements/test-ci.txt0000644000175000017500000000012213130603207020524 0ustar omeromer00000000000000pytest-cov codecov redis PyYAML msgpack-python>0.2.0 -r extras/sqs.txt sqlalchemy kombu-4.1.0/extra/requirements/extras/0000755000175000017500000000000013134154263017735 5ustar omeromer00000000000000kombu-4.1.0/extra/requirements/extras/qpid.txt0000644000175000017500000000004313130603207021421 0ustar omeromer00000000000000qpid-python>=0.26 qpid-tools>=0.26 kombu-4.1.0/extra/requirements/extras/sqlalchemy.txt0000644000175000017500000000001313130603207022623 0ustar omeromer00000000000000sqlalchemy kombu-4.1.0/extra/requirements/extras/zookeeper.txt0000644000175000017500000000001513130603207022466 0ustar omeromer00000000000000kazoo>=1.3.1 kombu-4.1.0/extra/requirements/extras/msgpack.txt0000644000175000017500000000002613130603207022112 0ustar omeromer00000000000000msgpack-python>=0.4.7 kombu-4.1.0/extra/requirements/extras/etcd.txt0000644000175000017500000000002313130603207021401 0ustar omeromer00000000000000python-etcd>=0.4.3 kombu-4.1.0/extra/requirements/extras/redis.txt0000644000175000017500000000001513130603207021571 0ustar omeromer00000000000000redis>=2.8.0 kombu-4.1.0/extra/requirements/extras/librabbitmq.txt0000644000175000017500000000002313130603207022752 0ustar omeromer00000000000000librabbitmq>=1.5.2 kombu-4.1.0/extra/requirements/extras/mongodb.txt0000644000175000017500000000002413130603207022110 0ustar omeromer00000000000000pymongo>=2.6.2,<3.0 kombu-4.1.0/extra/requirements/extras/slmq.txt0000644000175000017500000000003313130603207021437 0ustar omeromer00000000000000softlayer_messaging>=1.0.3 kombu-4.1.0/extra/requirements/extras/couchdb.txt0000644000175000017500000000001213130603207022067 0ustar omeromer00000000000000pycouchdb kombu-4.1.0/extra/requirements/extras/consul.txt0000644000175000017500000000002513130603207021767 0ustar omeromer00000000000000python-consul>=0.6.0 kombu-4.1.0/extra/requirements/extras/sqs.txt0000644000175000017500000000002413130603207021271 0ustar omeromer00000000000000boto3>=1.4.4 pycurl kombu-4.1.0/extra/requirements/extras/pyro.txt0000644000175000017500000000000613130603207021454 0ustar omeromer00000000000000pyro4 kombu-4.1.0/extra/requirements/extras/yaml.txt0000644000175000017500000000001513130603207021425 0ustar omeromer00000000000000PyYAML>=3.10 kombu-4.1.0/extra/requirements/pkgutils.txt0000644000175000017500000000016713130603207021027 0ustar omeromer00000000000000setuptools>=20.6.7 wheel>=0.29.0 flake8>=2.5.4 flakeplus>=1.1 tox>=2.3.1 sphinx2rst>=1.0 bumpversion pydocstyle==1.1.1 kombu-4.1.0/extra/requirements/funtest.txt0000644000175000017500000000023613130603207020652 0ustar omeromer00000000000000# redis transport redis # MongoDB transport pymongo # Zookeeper transport kazoo # SQS transport boto3 # Qpid transport qpid-python>=0.26 qpid-tools>=0.26 kombu-4.1.0/extra/requirements/test.txt0000644000175000017500000000003413130603207020135 0ustar omeromer00000000000000pytz>dev case>=1.5.2 pytest kombu-4.1.0/extra/requirements/docs.txt0000644000175000017500000000006513130603207020112 0ustar omeromer00000000000000sphinx_celery>=1.1 librabbitmq -r extras/mongodb.txt kombu-4.1.0/extra/requirements/test-ci-py2.txt0000644000175000017500000000002213130603207021233 0ustar omeromer00000000000000-r extras/sqs.txt kombu-4.1.0/extra/requirements/dev.txt0000644000175000017500000000013713130603207017740 0ustar omeromer00000000000000https://github.com/celery/py-amqp/zipball/master https://github.com/celery/vine/zipball/master kombu-4.1.0/extra/appveyor/0000755000175000017500000000000013134154263015551 5ustar omeromer00000000000000kombu-4.1.0/extra/appveyor/install.ps10000644000175000017500000000534213130603207017641 0ustar omeromer00000000000000# Sample script to install Python and pip under Windows # Authors: Olivier Grisel and Kyle Kastner # License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ $BASE_URL = "https://www.python.org/ftp/python/" $GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py" $GET_PIP_PATH = "C:\get-pip.py" function DownloadPython ($python_version, $platform_suffix) { $webclient = New-Object System.Net.WebClient $filename = "python-" + $python_version + $platform_suffix + ".msi" $url = $BASE_URL + $python_version + "/" + $filename $basedir = $pwd.Path + "\" $filepath = $basedir + $filename if (Test-Path $filename) { Write-Host "Reusing" $filepath return $filepath } # Download and retry up to 5 times in case of network transient errors. Write-Host "Downloading" $filename "from" $url $retry_attempts = 3 for($i=0; $i -lt $retry_attempts; $i++){ try { $webclient.DownloadFile($url, $filepath) break } Catch [Exception]{ Start-Sleep 1 } } Write-Host "File saved at" $filepath return $filepath } function InstallPython ($python_version, $architecture, $python_home) { Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home if (Test-Path $python_home) { Write-Host $python_home "already exists, skipping." return $false } if ($architecture -eq "32") { $platform_suffix = "" } else { $platform_suffix = ".amd64" } $filepath = DownloadPython $python_version $platform_suffix Write-Host "Installing" $filepath "to" $python_home $args = "/qn /i $filepath TARGETDIR=$python_home" Write-Host "msiexec.exe" $args Start-Process -FilePath "msiexec.exe" -ArgumentList $args -Wait -Passthru Write-Host "Python $python_version ($architecture) installation complete" return $true } function InstallPip ($python_home) { $pip_path = $python_home + "/Scripts/pip.exe" $python_path = $python_home + "/python.exe" if (-not(Test-Path $pip_path)) { Write-Host "Installing pip..." $webclient = New-Object System.Net.WebClient $webclient.DownloadFile($GET_PIP_URL, $GET_PIP_PATH) Write-Host "Executing:" $python_path $GET_PIP_PATH Start-Process -FilePath "$python_path" -ArgumentList "$GET_PIP_PATH" -Wait -Passthru } else { Write-Host "pip already installed." } } function InstallPackage ($python_home, $pkg) { $pip_path = $python_home + "/Scripts/pip.exe" & $pip_path install $pkg } function main () { InstallPython $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON InstallPip $env:PYTHON InstallPackage $env:PYTHON wheel } main kombu-4.1.0/extra/appveyor/run_with_compiler.cmd0000644000175000017500000000346213130603207021765 0ustar omeromer00000000000000:: To build extensions for 64 bit Python 3, we need to configure environment :: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1) :: :: To build extensions for 64 bit Python 2, we need to configure environment :: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of: :: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0) :: :: 32 bit builds do not require specific environment configurations. :: :: Note: this script needs to be run with the /E:ON and /V:ON flags for the :: cmd interpreter, at least for (SDK v7.0) :: :: More details at: :: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows :: http://stackoverflow.com/a/13751649/163740 :: :: Author: Olivier Grisel :: License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ @ECHO OFF SET COMMAND_TO_RUN=%* SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows SET MAJOR_PYTHON_VERSION="%PYTHON_VERSION:~0,1%" IF %MAJOR_PYTHON_VERSION% == "2" ( SET WINDOWS_SDK_VERSION="v7.0" ) ELSE IF %MAJOR_PYTHON_VERSION% == "3" ( SET WINDOWS_SDK_VERSION="v7.1" ) ELSE ( ECHO Unsupported Python version: "%MAJOR_PYTHON_VERSION%" EXIT 1 ) IF "%PYTHON_ARCH%"=="64" ( ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %MAJOR_PYTHON_VERSION% on a 64 bit architecture SET DISTUTILS_USE_SDK=1 SET MSSdk=1 "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION% "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release ECHO Executing: %COMMAND_TO_RUN% call %COMMAND_TO_RUN% || EXIT 1 ) ELSE ( ECHO Using default MSVC build environment for 32 bit architecture ECHO Executing: %COMMAND_TO_RUN% call %COMMAND_TO_RUN% || EXIT 1 ) kombu-4.1.0/examples/0000755000175000017500000000000013134154263014377 5ustar omeromer00000000000000kombu-4.1.0/examples/simple_task_queue/0000755000175000017500000000000013134154263020116 5ustar omeromer00000000000000kombu-4.1.0/examples/simple_task_queue/client.py0000644000175000017500000000176613130603207021751 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from kombu.pools import producers from .queues import task_exchange priority_to_routing_key = { 'high': 'hipri', 'mid': 'midpri', 'low': 'lopri', } def send_as_task(connection, fun, args=(), kwargs={}, priority='mid'): payload = {'fun': fun, 'args': args, 'kwargs': kwargs} routing_key = priority_to_routing_key[priority] with producers[connection].acquire(block=True) as producer: producer.publish(payload, serializer='pickle', compression='bzip2', exchange=task_exchange, declare=[task_exchange], routing_key=routing_key) if __name__ == '__main__': from kombu import Connection from .tasks import hello_task connection = Connection('amqp://guest:guest@localhost:5672//') send_as_task(connection, fun=hello_task, args=('Kombu',), kwargs={}, priority='high') kombu-4.1.0/examples/simple_task_queue/worker.py0000644000175000017500000000242013130603207021770 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from kombu.mixins import ConsumerMixin from kombu.log import get_logger from kombu.utils.functional import reprcall from .queues import task_queues logger = get_logger(__name__) class Worker(ConsumerMixin): def __init__(self, connection): self.connection = connection def get_consumers(self, Consumer, channel): return [Consumer(queues=task_queues, accept=['pickle', 'json'], callbacks=[self.process_task])] def process_task(self, body, message): fun = body['fun'] args = body['args'] kwargs = body['kwargs'] logger.info('Got task: %s', reprcall(fun.__name__, args, kwargs)) try: fun(*args, **kwargs) except Exception as exc: logger.error('task raised exception: %r', exc) message.ack() if __name__ == '__main__': from kombu import Connection from kombu.utils.debug import setup_logging # setup root logger setup_logging(loglevel='INFO', loggers=['']) with Connection('amqp://guest:guest@localhost:5672//') as conn: try: worker = Worker(conn) worker.run() except KeyboardInterrupt: print('bye bye') kombu-4.1.0/examples/simple_task_queue/__init__.py0000644000175000017500000000000013130603207022206 0ustar omeromer00000000000000kombu-4.1.0/examples/simple_task_queue/queues.py0000644000175000017500000000053113130603207021767 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals from kombu import Exchange, Queue task_exchange = Exchange('tasks', type='direct') task_queues = [Queue('hipri', task_exchange, routing_key='hipri'), Queue('midpri', task_exchange, routing_key='midpri'), Queue('lopri', task_exchange, routing_key='lopri')] kombu-4.1.0/examples/simple_task_queue/tasks.py0000644000175000017500000000017313130603207021607 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals def hello_task(who='world'): print('Hello {0}'.format(who)) kombu-4.1.0/examples/memory_transport.py0000644000175000017500000000130013130603207020360 0ustar omeromer00000000000000""" Example that use memory transport for message produce. """ import time from kombu import Connection, Exchange, Queue, Consumer media_exchange = Exchange('media', 'direct') video_queue = Queue('video', exchange=media_exchange, routing_key='video') task_queues = [video_queue] def handle_message(body, message): print("%s RECEIVED MESSAGE: %r" % (time.time(), body)) message.ack() connection = Connection("memory:///") consumer = Consumer(connection, task_queues, callbacks=[handle_message]) producer = connection.Producer(serializer='json') producer.publish({"foo": "bar"}, exchange=media_exchange, routing_key='video', declare=task_queues) consumer.consume() connection.drain_events() kombu-4.1.0/examples/simple_send.py0000644000175000017500000000177613130603207017257 0ustar omeromer00000000000000""" Example that sends a single message and exits using the simple interface. You can use `simple_receive.py` (or `complete_receive.py`) to receive the message sent. """ from __future__ import absolute_import, unicode_literals from kombu import Connection #: Create connection #: If hostname, userid, password and virtual_host is not specified #: the values below are the default, but listed here so it can #: be easily changed. with Connection('amqp://guest:guest@localhost:5672//') as conn: #: SimpleQueue mimics the interface of the Python Queue module. #: First argument can either be a queue name or a kombu.Queue object. #: If a name, then the queue will be declared with the name as the queue #: name, exchange name and routing key. with conn.SimpleQueue('kombu_demo') as queue: queue.put({'hello': 'world'}, serializer='json', compression='zlib') ##### # If you don't use the with statement, you must always # remember to close objects. # queue.close() # connection.close() kombu-4.1.0/examples/complete_receive.py0000644000175000017500000000311513130603207020254 0ustar omeromer00000000000000""" Example of simple consumer that waits for a single message, acknowledges it and exits. """ from __future__ import absolute_import, unicode_literals, print_function from pprint import pformat from kombu import Connection, Exchange, Queue, Consumer, eventloop #: By default messages sent to exchanges are persistent (delivery_mode=2), #: and queues and exchanges are durable. exchange = Exchange('kombu_demo', type='direct') queue = Queue('kombu_demo', exchange, routing_key='kombu_demo') def pretty(obj): return pformat(obj, indent=4) #: This is the callback applied when a message is received. def handle_message(body, message): print('Received message: {0!r}'.format(body)) print(' properties:\n{0}'.format(pretty(message.properties))) print(' delivery_info:\n{0}'.format(pretty(message.delivery_info))) message.ack() #: Create a connection and a channel. #: If hostname, userid, password and virtual_host is not specified #: the values below are the default, but listed here so it can #: be easily changed. with Connection('amqp://guest:guest@localhost:5672//') as connection: #: Create consumer using our callback and queue. #: Second argument can also be a list to consume from #: any number of queues. with Consumer(connection, queue, callbacks=[handle_message]): #: Each iteration waits for a single event. Note that this #: event may not be a message, or a message that is to be #: delivered to the consumers channel, but any event received #: on the connection. for _ in eventloop(connection): pass kombu-4.1.0/examples/hello_publisher.py0000644000175000017500000000060613130603207020124 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals import datetime from kombu import Connection with Connection('amqp://guest:guest@localhost:5672//') as conn: simple_queue = conn.SimpleQueue('simple_queue') message = 'helloworld, sent at {0}'.format(datetime.datetime.today()) simple_queue.put(message) print('Sent: {0}'.format(message)) simple_queue.close() kombu-4.1.0/examples/simple_eventlet_send.py0000644000175000017500000000226713130603207021161 0ustar omeromer00000000000000""" Example that sends a single message and exits using the simple interface. You can use `simple_receive.py` (or `complete_receive.py`) to receive the message sent. """ from __future__ import absolute_import, unicode_literals import eventlet from kombu import Connection eventlet.monkey_patch() def send_many(n): #: Create connection #: If hostname, userid, password and virtual_host is not specified #: the values below are the default, but listed here so it can #: be easily changed. with Connection('amqp://guest:guest@localhost:5672//') as connection: #: SimpleQueue mimics the interface of the Python Queue module. #: First argument can either be a queue name or a kombu.Queue object. #: If a name, then the queue will be declared with the name as the #: queue name, exchange name and routing key. with connection.SimpleQueue('kombu_demo') as queue: def send_message(i): queue.put({'hello': 'world%s' % (i,)}) pool = eventlet.GreenPool(10) for i in range(n): pool.spawn(send_message, i) pool.waitall() if __name__ == '__main__': send_many(10) kombu-4.1.0/examples/complete_send.py0000644000175000017500000000227313130603207017567 0ustar omeromer00000000000000""" Example producer that sends a single message and exits. You can use `complete_receive.py` to receive the message sent. """ from __future__ import absolute_import, unicode_literals from kombu import Connection, Producer, Exchange, Queue #: By default messages sent to exchanges are persistent (delivery_mode=2), #: and queues and exchanges are durable. exchange = Exchange('kombu_demo', type='direct') queue = Queue('kombu_demo', exchange, routing_key='kombu_demo') with Connection('amqp://guest:guest@localhost:5672//') as connection: #: Producers are used to publish messages. #: a default exchange and routing key can also be specified #: as arguments the Producer, but we rather specify this explicitly #: at the publish call. producer = Producer(connection) #: Publish the message using the json serializer (which is the default), #: and zlib compression. The kombu consumer will automatically detect #: encoding, serialization and compression used and decode accordingly. producer.publish({'hello': 'world'}, exchange=exchange, routing_key='kombu_demo', serializer='json', compression='zlib') kombu-4.1.0/examples/hello_consumer.py0000644000175000017500000000057113130603207017763 0ustar omeromer00000000000000from __future__ import absolute_import, unicode_literals, print_function from kombu import Connection # noqa with Connection('amqp://guest:guest@localhost:5672//') as conn: simple_queue = conn.SimpleQueue('simple_queue') message = simple_queue.get(block=True, timeout=1) print('Received: {0}'.format(message.payload)) message.ack() simple_queue.close() kombu-4.1.0/examples/rpc-tut6/0000755000175000017500000000000013134154263016063 5ustar omeromer00000000000000kombu-4.1.0/examples/rpc-tut6/rpc_server.py0000644000175000017500000000252113130603207020600 0ustar omeromer00000000000000#!/usr/bin/env python from __future__ import absolute_import, unicode_literals from kombu import Connection, Queue from kombu.mixins import ConsumerProducerMixin rpc_queue = Queue('rpc_queue') def fib(n): if n == 0: return 0 elif n == 1: return 1 else: return fib(n - 1) + fib(n - 2) class Worker(ConsumerProducerMixin): def __init__(self, connection): self.connection = connection def get_consumers(self, Consumer, channel): return [Consumer( queues=[rpc_queue], on_message=self.on_request, accept={'application/json'}, prefetch_count=1, )] def on_request(self, message): n = message.payload['n'] print(' [.] fib({0})'.format(n)) result = fib(n) self.producer.publish( {'result': result}, exchange='', routing_key=message.properties['reply_to'], correlation_id=message.properties['correlation_id'], serializer='json', retry=True, ) message.ack() def start_worker(broker_url): connection = Connection(broker_url) print(' [x] Awaiting RPC requests') worker = Worker(connection) worker.run() if __name__ == '__main__': try: start_worker('pyamqp://') except KeyboardInterrupt: pass kombu-4.1.0/examples/rpc-tut6/rpc_client.py0000644000175000017500000000271413130603207020554 0ustar omeromer00000000000000#!/usr/bin/env python from __future__ import absolute_import, unicode_literals from kombu import Connection, Producer, Consumer, Queue, uuid class FibonacciRpcClient(object): def __init__(self, connection): self.connection = connection self.callback_queue = Queue(uuid(), exclusive=True, auto_delete=True) def on_response(self, message): if message.properties['correlation_id'] == self.correlation_id: self.response = message.payload['result'] def call(self, n): self.response = None self.correlation_id = uuid() with Producer(self.connection) as producer: producer.publish( {'n': n}, exchange='', routing_key='rpc_queue', declare=[self.callback_queue], reply_to=self.callback_queue.name, correlation_id=self.correlation_id, ) with Consumer(self.connection, on_message=self.on_response, queues=[self.callback_queue], no_ack=True): while self.response is None: self.connection.drain_events() return self.response def main(broker_url): connection = Connection(broker_url) fibonacci_rpc = FibonacciRpcClient(connection) print(' [x] Requesting fib(30)') response = fibonacci_rpc.call(30) print(' [.] Got {0!r}'.format(response)) if __name__ == '__main__': main('pyamqp://') kombu-4.1.0/examples/simple_eventlet_receive.py0000644000175000017500000000233513130603207021646 0ustar omeromer00000000000000""" Example that sends a single message and exits using the simple interface. You can use `simple_receive.py` (or `complete_receive.py`) to receive the message sent. """ from __future__ import absolute_import, unicode_literals import eventlet from kombu import Connection eventlet.monkey_patch() def wait_many(timeout=1): #: Create connection #: If hostname, userid, password and virtual_host is not specified #: the values below are the default, but listed here so it can #: be easily changed. with Connection('amqp://guest:guest@localhost:5672//') as connection: #: SimpleQueue mimics the interface of the Python Queue module. #: First argument can either be a queue name or a kombu.Queue object. #: If a name, then the queue will be declared with the name as the #: queue name, exchange name and routing key. with connection.SimpleQueue('kombu_demo') as queue: while True: try: message = queue.get(block=False, timeout=timeout) except queue.Empty: break else: message.ack() print(message.payload) eventlet.spawn(wait_many).wait() kombu-4.1.0/examples/experimental/0000755000175000017500000000000013134154263017074 5ustar omeromer00000000000000kombu-4.1.0/examples/experimental/async_consume.py0000644000175000017500000000140013130603207022300 0ustar omeromer00000000000000#!/usr/bin/env python from __future__ import absolute_import, unicode_literals from kombu import Connection, Exchange, Queue, Producer, Consumer from kombu.async import Hub hub = Hub() exchange = Exchange('asynt') queue = Queue('asynt', exchange, 'asynt') def send_message(conn): producer = Producer(conn) producer.publish('hello world', exchange=exchange, routing_key='asynt') print('message sent') def on_message(message): print('received: {0!r}'.format(message.body)) message.ack() hub.stop() # <-- exit after one message if __name__ == '__main__': conn = Connection('amqp://') conn.register_with_event_loop(hub) with Consumer(conn, [queue], on_message=on_message): send_message(conn) hub.run_forever() kombu-4.1.0/examples/simple_receive.py0000644000175000017500000000167713130603207017750 0ustar omeromer00000000000000""" Example receiving a message using the SimpleQueue interface. """ from __future__ import absolute_import, unicode_literals from kombu import Connection #: Create connection #: If hostname, userid, password and virtual_host is not specified #: the values below are the default, but listed here so it can #: be easily changed. with Connection('amqp://guest:guest@localhost:5672//') as conn: #: SimpleQueue mimics the interface of the Python Queue module. #: First argument can either be a queue name or a kombu.Queue object. #: If a name, then the queue will be declared with the name as the queue #: name, exchange name and routing key. with conn.SimpleQueue('kombu_demo') as queue: message = queue.get(block=True, timeout=10) message.ack() print(message.payload) #### #: If you don't use the with statement then you must aways # remember to close objects after use: # queue.close() # connection.close() kombu-4.1.0/INSTALL0000644000175000017500000000060313130603207013602 0ustar omeromer00000000000000Installation ============ You can install ``kombu`` either via the Python Package Index (PyPI) or from source. To install using ``pip``,:: $ pip install kombu To install using ``easy_install``,:: $ easy_install kombu If you have downloaded a source tarball you can install it by doing the following,:: $ python setup.py build # python setup.py install # as root