pyngus-2.2.1/0000775003420400342040000000000013107111745014223 5ustar kgiustikgiusti00000000000000pyngus-2.2.1/pyngus/0000775003420400342040000000000013107111745015550 5ustar kgiustikgiusti00000000000000pyngus-2.2.1/pyngus/link.py0000664003420400342040000010337713024050133017061 0ustar kgiustikgiusti00000000000000# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ "SenderEventHandler", "SenderLink", "ReceiverEventHandler", "ReceiverLink" ] import collections import logging import proton from pyngus.endpoint import Endpoint LOG = logging.getLogger(__name__) _PROTON_VERSION = (int(getattr(proton, "VERSION_MAJOR", 0)), int(getattr(proton, "VERSION_MINOR", 0))) # map property names to proton values: _dist_modes = {"copy": proton.Terminus.DIST_MODE_COPY, "move": proton.Terminus.DIST_MODE_MOVE} _snd_settle_modes = {"settled": proton.Link.SND_SETTLED, "unsettled": proton.Link.SND_UNSETTLED, "mixed": proton.Link.SND_MIXED} _rcv_settle_modes = {"first": proton.Link.RCV_FIRST, "second": proton.Link.RCV_SECOND} # TODO(kgiusti): this is duplicated in connection.py, put in common file class _CallbackLock(object): """A utility class for detecting when a callback invokes a non-reentrant Pyngus method. """ def __init__(self, link): super(_CallbackLock, self).__init__() self._link = link self.in_callback = 0 def __enter__(self): # manually lock parent - can't enter its non-reentrant methods self._link._connection._callback_lock.__enter__() self.in_callback += 1 return self def __exit__(self, exc_type, exc_val, exc_tb): self.in_callback -= 1 self._link._connection._callback_lock.__exit__(None, None, None) # if a call is made to a non-reentrant method while this context is # held, then the method will raise a RuntimeError(). Return false to # propagate the exception to the caller return False def _not_reentrant(func): """Decorator that prevents callbacks from calling into link methods that are not reentrant """ def wrap(*args, **kws): link = args[0] if link._callback_lock.in_callback: m = "Link %s cannot be invoked from a callback!" % func raise RuntimeError(m) return func(*args, **kws) return wrap class _Link(Endpoint): """A generic Link base class.""" def __init__(self, connection, pn_link): super(_Link, self).__init__(pn_link.name) self._connection = connection self._handler = None self._properties = None self._user_context = None self._rejected = False # requested link was refused self._failed = False # protocol error occurred self._callback_lock = _CallbackLock(self) # TODO(kgiusti): raise jira to add 'context' attr to api self._pn_link = pn_link pn_link.context = self def configure(self, target_address, source_address, handler, properties): """Assign addresses, properties, etc.""" self._handler = handler self._properties = properties dynamic_props = None if properties: dynamic_props = properties.get("dynamic-node-properties") mode = _dist_modes.get(properties.get("distribution-mode")) if mode is not None: self._pn_link.source.distribution_mode = mode mode = _snd_settle_modes.get(properties.get("snd-settle-mode")) if mode is not None: self._pn_link.snd_settle_mode = mode mode = _rcv_settle_modes.get(properties.get("rcv-settle-mode")) if mode is not None: self._pn_link.rcv_settle_mode = mode if target_address is None: if not self._pn_link.is_sender: raise Exception("Dynamic target not allowed") self._pn_link.target.dynamic = True if dynamic_props: self._pn_link.target.properties.clear() self._pn_link.target.properties.put_dict(dynamic_props) elif target_address: self._pn_link.target.address = target_address if source_address is None: if not self._pn_link.is_receiver: raise Exception("Dynamic source not allowed") self._pn_link.source.dynamic = True if dynamic_props: self._pn_link.source.properties.clear() self._pn_link.source.properties.put_dict(dynamic_props) elif source_address: self._pn_link.source.address = source_address @property def name(self): return self._name @property def connection(self): return self._connection def open(self): if self._pn_link.state & proton.Endpoint.LOCAL_UNINIT: LOG.debug("Opening the link.") self._pn_link.open() def _get_user_context(self): return self._user_context def _set_user_context(self, ctxt): self._user_context = ctxt _uc_docstr = """Arbitrary application object associated with this link.""" user_context = property(_get_user_context, _set_user_context, doc=_uc_docstr) @property def source_address(self): """Return the authorative source of the link.""" # If link is a sender, source is determined by the local # value, else use the remote. if self._pn_link.is_sender: return self._pn_link.source.address else: return self._pn_link.remote_source.address @property def target_address(self): """Return the authorative target of the link.""" # If link is a receiver, target is determined by the local # value, else use the remote. if self._pn_link.is_receiver: return self._pn_link.target.address else: return self._pn_link.remote_target.address def close(self, pn_condition=None): if self._pn_link.state & proton.Endpoint.LOCAL_ACTIVE: LOG.debug("Closing the link.") if pn_condition: self._pn_link.condition = pn_condition self._pn_link.close() @property def active(self): state = self._pn_link.state return (not self._failed and state == (proton.Endpoint.LOCAL_ACTIVE | proton.Endpoint.REMOTE_ACTIVE)) @property def closed(self): state = self._pn_link.state return (self._failed or state == (proton.Endpoint.LOCAL_CLOSED | proton.Endpoint.REMOTE_CLOSED)) def reject(self, pn_condition): self._rejected = True # prevent 'active' callback! self._pn_link.open() if pn_condition: self._pn_link.condition = pn_condition self._pn_link.close() def destroy(self): LOG.debug("link destroyed %s", str(self._pn_link)) self._user_context = None self._connection = None self._handler = None self._callback_lock = None if self._pn_link: session = self._pn_link.session.context self._pn_link.context = None self._pn_link.free() self._pn_link = None session.link_destroyed(self) # destroy session _after_ link def _process_delivery(self, pn_delivery): raise NotImplementedError("Must Override") def _process_credit(self): raise NotImplementedError("Must Override") def _link_failed(self, error): raise NotImplementedError("Must Override") def _session_closed(self): """Remote has closed the session used by this link.""" # if link not already closed: if self._endpoint_state & proton.Endpoint.REMOTE_ACTIVE: # simulate close received self._process_remote_state() elif self._endpoint_state & proton.Endpoint.REMOTE_UNINIT: # locally created link, will never come up self._failed = True self._link_failed("Parent session closed.") # Proton's event model was changed after 0.7 if (_PROTON_VERSION >= (0, 8)): _endpoint_event_map = { proton.Event.LINK_REMOTE_OPEN: Endpoint.REMOTE_OPENED, proton.Event.LINK_REMOTE_CLOSE: Endpoint.REMOTE_CLOSED, proton.Event.LINK_LOCAL_OPEN: Endpoint.LOCAL_OPENED, proton.Event.LINK_LOCAL_CLOSE: Endpoint.LOCAL_CLOSED} @staticmethod def _handle_proton_event(pn_event, connection): etype = pn_event.type if etype == proton.Event.DELIVERY: pn_link = pn_event.link pn_link.context and \ pn_link.context._process_delivery(pn_event.delivery) return True if etype == proton.Event.LINK_FLOW: pn_link = pn_event.link pn_link.context and pn_link.context._process_credit() return True ep_event = _Link._endpoint_event_map.get(etype) if ep_event is not None: pn_link = pn_event.link pn_link.context and \ pn_link.context._process_endpoint_event(ep_event) return True if etype == proton.Event.LINK_INIT: pn_link = pn_event.link # create a new link if requested by remote: c = hasattr(pn_link, 'context') and pn_link.context if not c: session = pn_link.session.context if (pn_link.is_sender and pn_link.name not in connection._sender_links): LOG.debug("Remotely initiated Sender needs init") link = session.request_sender(pn_link) connection._sender_links[pn_link.name] = link elif (pn_link.is_receiver and pn_link.name not in connection._receiver_links): LOG.debug("Remotely initiated Receiver needs init") link = session.request_receiver(pn_link) connection._receiver_links[pn_link.name] = link return True if etype == proton.Event.LINK_FINAL: LOG.debug("link finalized: %s", pn_event.context) return True return False # event not handled elif hasattr(proton.Event, "LINK_REMOTE_STATE"): # 0.7 proton event model @staticmethod def _handle_proton_event(pn_event, connection): if pn_event.type == proton.Event.LINK_REMOTE_STATE: pn_link = pn_event.link # create a new link if requested by remote: c = hasattr(pn_link, 'context') and pn_link.context if ((not c) and (pn_link.state & proton.Endpoint.LOCAL_UNINIT)): session = pn_link.session.context if (pn_link.is_sender and pn_link.name not in connection._sender_links): LOG.debug("Remotely initiated Sender needs init") link = session.request_sender(pn_link) connection._sender_links[pn_link.name] = link elif (pn_link.is_receiver and pn_link.name not in connection._receiver_links): LOG.debug("Remotely initiated Receiver needs init") link = session.request_receiver(pn_link) connection._receiver_links[pn_link.name] = link pn_link.context._process_remote_state() return True elif pn_event.type == proton.Event.LINK_LOCAL_STATE: pn_link = pn_event.link pn_link.context._process_local_state() elif pn_event.type == proton.Event.LINK_FLOW: pn_link = pn_event.link pn_link.context._process_credit() elif pn_event.type == proton.Event.DELIVERY: pn_link = pn_event.link pn_delivery = pn_event.delivery pn_link.context._process_delivery(pn_delivery) else: return False # unknown return True # endpoint methods: @property def _endpoint_state(self): return self._pn_link.state def _ep_error(self, error): super(_Link, self)._ep_error(error) self._failed = True self._link_failed("Endpoint protocol error: %s" % error) def _get_remote_settle_modes(pn_link): """Return a map containing the settle modes as provided by the remote. Skip any default value. """ modes = {} snd = pn_link.remote_snd_settle_mode if snd == proton.Link.SND_UNSETTLED: modes['snd-settle-mode'] = 'unsettled' elif snd == proton.Link.SND_SETTLED: modes['snd-settle-mode'] = 'settled' if pn_link.remote_rcv_settle_mode == proton.Link.RCV_SECOND: modes['rcv-settle-mode'] = 'second' return modes class SenderEventHandler(object): def sender_active(self, sender_link): LOG.debug("sender_active (ignored)") def sender_remote_closed(self, sender_link, pn_condition): LOG.debug("sender_remote_closed condition=%s (ignored)", pn_condition) def sender_closed(self, sender_link): LOG.debug("sender_closed (ignored)") def credit_granted(self, sender_link): LOG.debug("credit_granted (ignored)") def sender_failed(self, sender_link, error): """Protocol error occurred.""" LOG.debug("sender_failed error=%s (ignored)", error) class SenderLink(_Link): # Status for message send callback # ABORTED = -2 TIMED_OUT = -1 UNKNOWN = 0 ACCEPTED = 1 REJECTED = 2 RELEASED = 3 MODIFIED = 4 _DISPOSITION_STATE_MAP = { proton.Disposition.ACCEPTED: ACCEPTED, proton.Disposition.REJECTED: REJECTED, proton.Disposition.RELEASED: RELEASED, proton.Disposition.MODIFIED: MODIFIED, } class _SendRequest(object): """Tracks sending a single message.""" def __init__(self, link, tag, message, callback, handle, deadline): self.link = link self.tag = tag self.message = message self.callback = callback self.handle = handle self.deadline = deadline self.link._send_requests[self.tag] = self if self.deadline: self.link._connection._add_timer(self.deadline, self) def __call__(self): """Invoked by Connection on timeout (now <= deadline).""" self.link._send_expired(self) def destroy(self, state, info): """Invoked on final completion of send.""" if self.deadline: self.link._connection._cancel_timer(self.deadline, self) if self.tag in self.link._send_requests: del self.link._send_requests[self.tag] if self.callback: with self.link._callback_lock: self.callback(self.link, self.handle, state, info) def __init__(self, connection, pn_link): super(SenderLink, self).__init__(connection, pn_link) self._send_requests = {} # indexed by tag self._pending_sends = collections.deque() # tags in order sent self._next_deadline = 0 self._next_tag = 0 self._last_credit = 0 # TODO(kgiusti) - think about send-settle-mode configuration def send(self, message, delivery_callback=None, handle=None, deadline=None): tag = "pyngus-tag-%s" % self._next_tag self._next_tag += 1 send_req = SenderLink._SendRequest(self, tag, message, delivery_callback, handle, deadline) self._pn_link.delivery(tag) pn_delivery = self._pn_link.current if pn_delivery and pn_delivery.writable: # send oldest pending: if self._pending_sends: self._pending_sends.append(tag) tag = self._pending_sends.popleft() send_req = self._send_requests[tag] self._write_msg(pn_delivery, send_req) else: LOG.debug("Send is pending for credit, tag=%s", tag) self._pending_sends.append(tag) return 0 @property def pending(self): return len(self._send_requests) @property def credit(self): return self._pn_link.credit def reject(self, pn_condition=None): """See Link Reject, AMQP1.0 spec.""" self._pn_link.source.type = proton.Terminus.UNSPECIFIED super(SenderLink, self).reject(pn_condition) @_not_reentrant def destroy(self): self._connection._remove_sender(self._name) self._connection = None super(SenderLink, self).destroy() def _process_delivery(self, pn_delivery): """Check if the delivery can be processed.""" if pn_delivery.tag in self._send_requests: if pn_delivery.settled or pn_delivery.remote_state: # remote has reached a 'terminal state' outcome = pn_delivery.remote_state state = SenderLink._DISPOSITION_STATE_MAP.get(outcome, self.UNKNOWN) pn_disposition = pn_delivery.remote info = {} if state == SenderLink.REJECTED: if pn_disposition.condition: info["condition"] = pn_disposition.condition elif state == SenderLink.MODIFIED: info["delivery-failed"] = pn_disposition.failed info["undeliverable-here"] = pn_disposition.undeliverable annotations = pn_disposition.annotations if annotations: info["message-annotations"] = annotations send_req = self._send_requests.pop(pn_delivery.tag) send_req.destroy(state, info) pn_delivery.settle() elif pn_delivery.writable: # we can now send on this delivery if self._pending_sends: tag = self._pending_sends.popleft() send_req = self._send_requests[tag] self._write_msg(pn_delivery, send_req) else: # tag no longer valid, expired or canceled send? LOG.debug("Delivery ignored, tag=%s", str(pn_delivery.tag)) pn_delivery.settle() def _process_credit(self): # check if any pending deliveries are now writable: pn_delivery = self._pn_link.current while (self._pending_sends and pn_delivery and pn_delivery.writable): self._process_delivery(pn_delivery) pn_delivery = self._pn_link.current # Alert if credit has become available if self._handler and not self._rejected: if 0 < self._pn_link.credit > self._last_credit: with self._callback_lock: self._handler.credit_granted(self) self._last_credit = self._pn_link.credit def _write_msg(self, pn_delivery, send_req): # given a writable delivery, send a message self._pn_link.send(send_req.message.encode()) self._pn_link.advance() self._last_credit = self._pn_link.credit if not send_req.callback: # no disposition callback, so we can discard the send request and # settle the delivery immediately send_req.destroy(SenderLink.UNKNOWN, {}) pn_delivery.settle() def _send_expired(self, send_req): LOG.debug("Send request timed-out, tag=%s", send_req.tag) try: self._pending_sends.remove(send_req.tag) except ValueError: pass send_req.destroy(SenderLink.TIMED_OUT, None) def _link_failed(self, error): if self._handler and not self._rejected: with self._callback_lock: self._handler.sender_failed(self, error) # endpoint state machine actions: def _ep_active(self): LOG.debug("SenderLink is up") if self._handler and not self._rejected: with self._callback_lock: self._handler.sender_active(self) def _ep_need_close(self): LOG.debug("SenderLink remote closed") if self._handler and not self._rejected: cond = self._pn_link.remote_condition with self._callback_lock: self._handler.sender_remote_closed(self, cond) def _ep_closed(self): LOG.debug("SenderLink close completed") # abort any pending sends self._pending_sends.clear() pn_condition = self._pn_link.condition info = {"condition": pn_condition} if pn_condition else None while self._send_requests: key, send_req = self._send_requests.popitem() send_req.destroy(SenderLink.ABORTED, info) if self._handler and not self._rejected: with self._callback_lock: self._handler.sender_closed(self) def _ep_requested(self): LOG.debug("Remote has requested a SenderLink") handler = self._connection._handler if handler: pn_link = self._pn_link props = _get_remote_settle_modes(pn_link) # has the remote requested a source address? req_source = "" if pn_link.remote_source.dynamic: req_source = None req_props = pn_link.remote_source.properties if req_props and req_props.next() == proton.Data.MAP: props["dynamic-node-properties"] = req_props.get_dict() elif pn_link.remote_source.address: req_source = pn_link.remote_source.address props["target-address"] = pn_link.remote_target.address dist_mode = pn_link.remote_source.distribution_mode if (dist_mode == proton.Terminus.DIST_MODE_COPY): props["distribution-mode"] = "copy" elif (dist_mode == proton.Terminus.DIST_MODE_MOVE): props["distribution-mode"] = "move" with self._connection._callback_lock: handler.sender_requested(self._connection, pn_link.name, # handle pn_link.name, req_source, props) class ReceiverEventHandler(object): def receiver_active(self, receiver_link): LOG.debug("receiver_active (ignored)") def receiver_remote_closed(self, receiver_link, pn_condition): LOG.debug("receiver_remote_closed condition=%s (ignored)", pn_condition) def receiver_closed(self, receiver_link): LOG.debug("receiver_closed (ignored)") def receiver_failed(self, receiver_link, error): """Protocol error occurred.""" LOG.debug("receiver_failed error=%s (ignored)", error) def message_received(self, receiver_link, message, handle): LOG.debug("message_received (ignored)") class ReceiverLink(_Link): def __init__(self, connection, pn_link): super(ReceiverLink, self).__init__(connection, pn_link) self._next_handle = 0 self._unsettled_deliveries = {} # indexed by handle # TODO(kgiusti) - think about receiver-settle-mode configuration @property def capacity(self): return self._pn_link.credit def add_capacity(self, amount): self._pn_link.flow(amount) def _settle_delivery(self, handle, state): pn_delivery = self._unsettled_deliveries.pop(handle, None) if pn_delivery is None: raise Exception("Invalid message handle: %s" % str(handle)) pn_delivery.update(state) pn_delivery.settle() def message_accepted(self, handle): self._settle_delivery(handle, proton.Delivery.ACCEPTED) def message_released(self, handle): self._settle_delivery(handle, proton.Delivery.RELEASED) def message_rejected(self, handle, pn_condition=None): pn_delivery = self._unsettled_deliveries.pop(handle, None) if pn_delivery is None: raise Exception("Invalid message handle: %s" % str(handle)) if pn_condition: pn_delivery.local.condition = pn_condition pn_delivery.update(proton.Delivery.REJECTED) pn_delivery.settle() def message_modified(self, handle, delivery_failed, undeliverable, annotations): pn_delivery = self._unsettled_deliveries.pop(handle, None) if pn_delivery is None: raise Exception("Invalid message handle: %s" % str(handle)) pn_delivery.local.failed = delivery_failed pn_delivery.local.undeliverable = undeliverable if annotations: pn_delivery.local.annotations = annotations pn_delivery.update(proton.Delivery.MODIFIED) pn_delivery.settle() def reject(self, pn_condition=None): """See Link Reject, AMQP1.0 spec.""" self._pn_link.target.type = proton.Terminus.UNSPECIFIED super(ReceiverLink, self).reject(pn_condition) @_not_reentrant def destroy(self): self._connection._remove_receiver(self._name) self._connection = None super(ReceiverLink, self).destroy() def _process_delivery(self, pn_delivery): """Check if the delivery can be processed.""" if pn_delivery.readable and not pn_delivery.partial: data = self._pn_link.recv(pn_delivery.pending) msg = proton.Message() msg.decode(data) self._pn_link.advance() if self._handler: handle = "rmsg-%s:%x" % (self._name, self._next_handle) self._next_handle += 1 self._unsettled_deliveries[handle] = pn_delivery with self._callback_lock: self._handler.message_received(self, msg, handle) else: # TODO(kgiusti): is it ok to assume Delivery.REJECTED? pn_delivery.settle() def _process_credit(self): # Only used by SenderLink pass def _link_failed(self, error): if self._handler and not self._rejected: with self._callback_lock: self._handler.receiver_failed(self, error) # endpoint state machine actions: def _ep_active(self): LOG.debug("ReceiverLink is up") if self._handler and not self._rejected: with self._callback_lock: self._handler.receiver_active(self) def _ep_need_close(self): LOG.debug("ReceiverLink remote closed") if self._handler and not self._rejected: cond = self._pn_link.remote_condition with self._callback_lock: self._handler.receiver_remote_closed(self, cond) def _ep_closed(self): LOG.debug("ReceiverLink close completed") if self._handler and not self._rejected: with self._callback_lock: self._handler.receiver_closed(self) def _ep_requested(self): LOG.debug("Remote has initiated a ReceiverLink") handler = self._connection._handler if handler: pn_link = self._pn_link props = _get_remote_settle_modes(pn_link) # has the remote requested a target address? req_target = "" if pn_link.remote_target.dynamic: req_target = None req_props = pn_link.remote_target.properties if req_props and req_props.next() == proton.Data.MAP: props["dynamic-node-properties"] = req_props.get_dict() elif pn_link.remote_target.address: req_target = pn_link.remote_target.address props["source-address"] = pn_link.remote_source.address dist_mode = pn_link.remote_source.distribution_mode if (dist_mode == proton.Terminus.DIST_MODE_COPY): props["distribution-mode"] = "copy" elif (dist_mode == proton.Terminus.DIST_MODE_MOVE): props["distribution-mode"] = "move" with self._connection._callback_lock: handler.receiver_requested(self._connection, pn_link.name, # handle pn_link.name, req_target, props) class _SessionProxy(Endpoint): """Corresponds to a Proton Session object.""" def __init__(self, name, connection, pn_session=None): super(_SessionProxy, self).__init__(name) self._locally_initiated = not pn_session self._connection = connection if not pn_session: pn_session = connection._pn_connection.session() self._pn_session = pn_session self._links = set() pn_session.context = self def open(self): if self._pn_session.state & proton.Endpoint.LOCAL_UNINIT: self._pn_session.open() def new_sender(self, name): """Create a new sender link.""" pn_link = self._pn_session.sender(name) return self.request_sender(pn_link) def request_sender(self, pn_link): """Create link from request for a sender.""" sl = SenderLink(self._connection, pn_link) self._links.add(sl) return sl def new_receiver(self, name): """Create a new receiver link.""" pn_link = self._pn_session.receiver(name) return self.request_receiver(pn_link) def request_receiver(self, pn_link): """Create link from request for a receiver.""" rl = ReceiverLink(self._connection, pn_link) self._links.add(rl) return rl def link_destroyed(self, link): """Link has been destroyed.""" self._links.discard(link) if not self._links: # no more links LOG.debug("destroying unneeded session") self._pn_session.close() self._pn_session.free() self._pn_session = None self._connection = None # Proton's event model was changed after 0.7 if (_PROTON_VERSION >= (0, 8)): _endpoint_event_map = { proton.Event.SESSION_REMOTE_OPEN: Endpoint.REMOTE_OPENED, proton.Event.SESSION_REMOTE_CLOSE: Endpoint.REMOTE_CLOSED, proton.Event.SESSION_LOCAL_OPEN: Endpoint.LOCAL_OPENED, proton.Event.SESSION_LOCAL_CLOSE: Endpoint.LOCAL_CLOSED} @staticmethod def _handle_proton_event(pn_event, connection): ep_event = _SessionProxy._endpoint_event_map.get(pn_event.type) if ep_event is not None: pn_session = pn_event.context pn_session.context._process_endpoint_event(ep_event) elif pn_event.type == proton.Event.SESSION_INIT: # create a new session if requested by remote: pn_session = pn_event.context c = hasattr(pn_session, 'context') and pn_session.context if not c: LOG.debug("Opening remotely initiated session") name = "session-%d" % connection._remote_session_id connection._remote_session_id += 1 _SessionProxy(name, connection, pn_session) elif pn_event.type == proton.Event.SESSION_FINAL: LOG.debug("Session finalized: %s", pn_event.context) else: return False # unknown return True # handled elif hasattr(proton.Event, "SESSION_REMOTE_STATE"): # 0.7 proton event model @staticmethod def _handle_proton_event(pn_event, connection): if pn_event.type == proton.Event.SESSION_REMOTE_STATE: pn_session = pn_event.session # create a new session if requested by remote: c = hasattr(pn_session, 'context') and pn_session.context if not c: LOG.debug("Opening remotely initiated session") name = "session-%d" % connection._remote_session_id connection._remote_session_id += 1 _SessionProxy(name, connection, pn_session) pn_session.context._process_remote_state() elif pn_event.type == proton.Event.SESSION_LOCAL_STATE: pn_session = pn_event.session pn_session.context._process_local_state() else: return False # unknown return True # handled @property def _endpoint_state(self): return self._pn_session.state # endpoint state machine actions: def _ep_requested(self): """Peer has requested a new session.""" LOG.debug("Session %s requested - opening...", self._name) self.open() def _ep_active(self): """Both ends of the Endpoint have become active.""" LOG.debug("Session %s active", self._name) def _ep_need_close(self): """Peer has closed its end of the session.""" LOG.debug("Session %s close requested - closing...", self._name) links = self._links.copy() # may modify _links for link in links: link._session_closed() def _ep_closed(self): """Both ends of the endpoint have closed.""" LOG.debug("Session %s closed", self._name) pyngus-2.2.1/pyngus/endpoint.py0000664003420400342040000001633212565142272017755 0ustar kgiustikgiusti00000000000000# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import proton LOG = logging.getLogger(__name__) _PROTON_VERSION = (int(getattr(proton, "VERSION_MAJOR", 0)), int(getattr(proton, "VERSION_MINOR", 0))) class Endpoint(object): """AMQP Endpoint state machine.""" # Endpoint States: STATE_UNINIT = 0 # initial state STATE_PENDING = 1 # local opened, waiting for remote to open STATE_REQUESTED = 2 # remote opened, waiting for local to open STATE_CANCELLED = 3 # local closed before remote opened STATE_ABANDONED = 4 # remote closed before local opened STATE_ACTIVE = 5 STATE_NEED_CLOSE = 6 # remote closed, waiting for local close STATE_CLOSING = 7 # locally closed, pending remote close STATE_CLOSED = 8 # terminal state STATE_ERROR = 9 # unexpected state transition STATE_NAMES = ["STATE_UNINIT", "STATE_PENDING", "STATE_REQUESTED", "STATE_CANCELLED", "STATE_ABANDONED", "STATE_ACTIVE", "STATE_NEED_CLOSE", "STATE_CLOSING", "STATE_CLOSED", "STATE_ERROR"] # Events: # These correspond to endpoint events generated by the Proton Engine LOCAL_OPENED = 0 LOCAL_CLOSED = 1 REMOTE_OPENED = 2 REMOTE_CLOSED = 3 EVENT_NAMES = ["LOCAL_OPENED", "LOCAL_CLOSED", "REMOTE_OPENED", "REMOTE_CLOSED"] # Endpoint Finite State Machine: # Indexed by current state, each entry is indexed by the event received and # returns a tuple of (next-state, action). If there is no entry for a # given event, _ep_error() is invoked and the endpoint moves to the # terminal STATE_ERROR state. _FSM = {} _FSM[STATE_UNINIT] = { LOCAL_OPENED: (STATE_PENDING, None), REMOTE_OPENED: (STATE_REQUESTED, lambda s: s._ep_requested()) } _FSM[STATE_PENDING] = { LOCAL_CLOSED: (STATE_CANCELLED, None), REMOTE_OPENED: (STATE_ACTIVE, lambda s: s._ep_active()) } _FSM[STATE_REQUESTED] = { LOCAL_OPENED: (STATE_ACTIVE, lambda s: s._ep_active()), REMOTE_CLOSED: (STATE_ABANDONED, None) } _FSM[STATE_CANCELLED] = { REMOTE_OPENED: (STATE_CLOSING, None) } _FSM[STATE_ABANDONED] = { LOCAL_OPENED: (STATE_NEED_CLOSE, lambda s: s._ep_need_close()), LOCAL_CLOSED: (STATE_CLOSED, lambda s: s._ep_closed()) } _FSM[STATE_ACTIVE] = { LOCAL_CLOSED: (STATE_CLOSING, None), REMOTE_CLOSED: (STATE_NEED_CLOSE, lambda s: s._ep_need_close()) } _FSM[STATE_NEED_CLOSE] = { LOCAL_CLOSED: (STATE_CLOSED, lambda s: s._ep_closed()) } _FSM[STATE_CLOSING] = { REMOTE_CLOSED: (STATE_CLOSED, lambda s: s._ep_closed()) } _FSM[STATE_CLOSED] = { REMOTE_CLOSED: (STATE_CLOSED, None) } _FSM[STATE_ERROR] = { # terminal state LOCAL_OPENED: (STATE_ERROR, None), LOCAL_CLOSED: (STATE_ERROR, None), REMOTE_OPENED: (STATE_ERROR, None), REMOTE_CLOSED: (STATE_ERROR, None) } def __init__(self, name): self._name = name self._state = Endpoint.STATE_UNINIT if (_PROTON_VERSION < (0, 8)): # The old proton event model did not generate specific endpoint # events. Rather it simply indicated local or remote state change # occured without giving the value of the state (opened/closed). # Map these events to open and close events, assuming the Proton # endpoint state transitions are fixed to the following sequence: # UNINIT --> ACTIVE --> CLOSED self._remote_events = [Endpoint.REMOTE_OPENED, Endpoint.REMOTE_CLOSED] self._local_events = [Endpoint.LOCAL_OPENED, Endpoint.LOCAL_CLOSED] def _process_endpoint_event(self, event): """Called when the Proton Engine generates an endpoint state change event. """ LOG.debug("Endpoint %s event: %s", self._name, Endpoint.EVENT_NAMES[event]) state_fsm = Endpoint._FSM[self._state] entry = state_fsm.get(event) if not entry: # protocol error: invalid event for current state old_state = self._state self._state = Endpoint.STATE_ERROR self._ep_error("invalid event=%s in state=%s" % (Endpoint.EVENT_NAMES[event], Endpoint.STATE_NAMES[old_state])) return LOG.debug("Endpoint %s Old State: %s New State: %s", self._name, Endpoint.STATE_NAMES[self._state], Endpoint.STATE_NAMES[entry[0]]) self._state = entry[0] if entry[1]: entry[1](self) if (_PROTON_VERSION < (0, 8)): def _process_remote_state(self): """Call when remote endpoint state changes.""" try: event = self._remote_events.pop(0) self._process_endpoint_event(event) except IndexError: LOG.debug("Endpoint %s: ignoring unexpected remote event", self._name) def _process_local_state(self): """Call when local endpoint state changes.""" try: event = self._local_events.pop(0) self._process_endpoint_event(event) except IndexError: LOG.debug("Endpoint %s: ignoring unexpected local event", self._name) else: def _process_remote_state(self): pass def _process_local_state(self): pass @property def _endpoint_state(self): """Returns the current endpoint state.""" raise NotImplementedError("Must Override") # state entry actions - overridden by endpoint subclass: def _ep_requested(self): """Remote has activated a new endpoint.""" LOG.debug("endpoint_requested - ignored") def _ep_active(self): """Both ends of the Endpoint have become active.""" LOG.debug("endpoint_active - ignored") def _ep_need_close(self): """The remote has closed its end of the endpoint.""" LOG.debug("endpoint_need_close - ignored") def _ep_closed(self): """Both ends of the endpoint have closed.""" LOG.debug("endpoint_closed - ignored") def _ep_error(self, error): """Unanticipated/illegal state change.""" LOG.error("Endpoint state error: endpoint=%s, error=%s", self._name, error) pyngus-2.2.1/pyngus/sockets.py0000664003420400342040000000755613107106476017617 0ustar kgiustikgiusti00000000000000# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """helper methods that provide boilerplate socket I/O and Connection processing. """ __all__ = [ "read_socket_input", "write_socket_output" ] import errno import logging import socket from pyngus.connection import Connection LOG = logging.getLogger(__name__) def read_socket_input(connection, socket_obj): """Read from the network layer and processes all data read. Can support both blocking and non-blocking sockets. Returns the number of input bytes processed, or EOS if input processing is done. Any exceptions raised by the socket are re-raised. """ count = connection.needs_input if count <= 0: return count # 0 or EOS while True: try: sock_data = socket_obj.recv(count) break except socket.timeout as e: LOG.debug("Socket timeout exception %s", str(e)) raise # caller must handle except socket.error as e: err = e.errno if err in [errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR]: # try again later return 0 # otherwise, unrecoverable, caller must handle LOG.debug("Socket error exception %s", str(e)) raise except Exception as e: # beats me... assume fatal LOG.debug("unknown socket exception %s", str(e)) raise # caller must handle if len(sock_data) > 0: count = connection.process_input(sock_data) else: LOG.debug("Socket closed") count = Connection.EOS connection.close_input() connection.close_output() return count def write_socket_output(connection, socket_obj): """Write data to the network layer. Can support both blocking and non-blocking sockets. Returns the number of output bytes sent, or EOS if output processing is done. Any exceptions raised by the socket are re-raised. """ count = connection.has_output if count <= 0: return count # 0 or EOS data = connection.output_data() if not data: # error - has_output > 0, but no data? return Connection.EOS while True: try: count = socket_obj.send(data) break except socket.timeout as e: LOG.debug("Socket timeout exception %s", str(e)) raise # caller must handle except socket.error as e: err = e.errno if err in [errno.EAGAIN, errno.EWOULDBLOCK, errno.EINTR]: # try again later return 0 # else assume fatal let caller handle it: LOG.debug("Socket error exception %s", str(e)) raise except Exception as e: # beats me... assume fatal LOG.debug("unknown socket exception %s", str(e)) raise if count > 0: connection.output_written(count) elif data: LOG.debug("Socket closed") count = Connection.EOS connection.close_output() connection.close_input() return count pyngus-2.2.1/pyngus/connection.py0000664003420400342040000010545713055627422020303 0ustar kgiustikgiusti00000000000000# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ "ConnectionEventHandler", "Connection" ] import heapq import logging import proton import warnings import ssl from pyngus.endpoint import Endpoint from pyngus.link import _Link from pyngus.link import _SessionProxy LOG = logging.getLogger(__name__) _PROTON_VERSION = (int(getattr(proton, "VERSION_MAJOR", 0)), int(getattr(proton, "VERSION_MINOR", 0))) class _CallbackLock(object): """A utility class for detecting when a callback invokes a non-reentrant Pyngus method. """ def __init__(self): super(_CallbackLock, self).__init__() self.in_callback = 0 def __enter__(self): self.in_callback += 1 return self def __exit__(self, exc_type, exc_val, exc_tb): self.in_callback -= 1 # if a call is made to a non-reentrant method while this context is # held, then the method will raise a RuntimeError(). Return false to # propagate the exception to the caller return False class ConnectionEventHandler(object): """An implementation of an AMQP 1.0 Connection.""" def connection_active(self, connection): """Connection handshake has completed.""" LOG.debug("connection_active (ignored)") def connection_failed(self, connection, error): """Connection's transport has failed in some way.""" LOG.warn("connection_failed, error=%s (ignored)", str(error)) def connection_remote_closed(self, connection, pn_condition): """Peer has closed its end of the connection.""" LOG.debug("connection_remote_closed (ignored)") def connection_closed(self, connection): """The connection has cleanly closed.""" LOG.debug("connection_closed (ignored)") def sender_requested(self, connection, link_handle, name, requested_source, properties): """Peer has requested a SenderLink be created.""" # call accept_sender to accept new link, # reject_sender to reject it. LOG.debug("sender_requested (ignored)") def receiver_requested(self, connection, link_handle, name, requested_target, properties): """Peer has requested a ReceiverLink be created.""" # call accept_receiver to accept new link, # reject_receiver to reject it. LOG.debug("receiver_requested (ignored)") # No longer supported by proton >= 0.10, so this method is deprecated def sasl_step(self, connection, pn_sasl): """DEPRECATED""" LOG.debug("sasl_step (ignored)") def sasl_done(self, connection, pn_sasl, result): """SASL exchange complete.""" LOG.debug("sasl_done (ignored)") class Connection(Endpoint): """A Connection to a peer.""" EOS = -1 # indicates 'I/O stream closed' # set of all SASL connection configuration properties _SASL_PROPS = set(['x-username', 'x-password', 'x-require-auth', 'x-sasl-mechs', 'x-sasl-config-dir', 'x-sasl-config-name', 'x-force-sasl']) # set of all SSL connection configuration properties _SSL_PROPS = set(['x-ssl', 'x-ssl-identity', 'x-ssl-ca-file', 'x-ssl-verify-mode', 'x-ssl-server', 'x-ssl-peer-name', 'x-ssl-allow-cleartext']) # SSL peer certificate verification _VERIFY_MODES = {'verify-peer': proton.SSLDomain.VERIFY_PEER_NAME, 'verify-cert': proton.SSLDomain.VERIFY_PEER, 'no-verify': proton.SSLDomain.ANONYMOUS_PEER} def _not_reentrant(func): """Decorator that prevents callbacks from calling into methods that are not reentrant """ def wrap(self, *args, **kws): if self._callback_lock and self._callback_lock.in_callback: m = "Connection %s cannot be invoked from a callback!" % func raise RuntimeError(m) return func(self, *args, **kws) return wrap def __init__(self, container, name, event_handler=None, properties=None): """Create a new connection from the Container properties: map, properties of the new connection. The following keys and values are supported: idle-time-out: float, time in seconds before an idle link will be closed. hostname: string, the name of the host to which this connection is being made, sent in the Open frame. max-frame-size: int, maximum acceptable frame size in bytes. properties: map, proton connection properties sent to the peer. The following custom connection properties are supported: x-server: boolean, set this to True to configure the connection as a server side connection. This should be set True if the connection was remotely initiated (e.g. accept on a listening socket). If the connection was locally initiated (e.g. by calling connect()), then this value should be set to False. This setting is used by authentication and encryption to configure the connection's role. The default value is False for client mode. x-username: string, the client's username to use when authenticating with a server. x-password: string, the client's password, used for authentication. x-require-auth: boolean, reject remotely-initiated client connections that fail to provide valid credentials for authentication. x-sasl-mechs: string, a space-separated list of mechanisms that are allowed for authentication. Defaults to "ANONYMOUS" x-sasl-config-dir: string, path to the directory containing the Cyrus SASL server configuration. x-sasl-config-name: string, name of the Cyrus SASL configuration file contained in the x-sasl-config-dir (without the '.conf' suffix) x-force-sasl: by default SASL authentication is disabled. SASL will be enabled if any of the above x-sasl-* options are set. For clients using GSSAPI it is likely none of these options will be set. In order for these clients to authenticate this flag must be set true. The value of this property is ignored if any of the other SASL related properties are set. x-ssl: boolean, Allows clients to connect using SSL setting a minimum viable configuration (using the system's CA bundle to validate the peer's certificate). This setting is overwritten if subsequent SSL settings are found. x-ssl-identity: tuple, contains identifying certificate information which will be presented to the peer. The first item in the tuple is the path to the certificate file (PEM format). The second item is the path to a file containing the private key used to sign the certificate (PEM format, optional if private key is stored in the certificate itself). The last item is the password used to encrypt the private key (string, not required if private key is not encrypted) x-ssl-ca-file: string, path to a file containing the certificates of the trusted Certificate Authorities that will be used to check the signature of the peer's certificate. Not used if x-ssl-verify-mode is set to 'no-verify'. To use the system's default CAs instead leave this option out and set x-ssl to True. x-ssl-verify-mode: string, configure the level of security provided by SSL. Possible values: "verify-peer" (default) - most secure, requires peer to supply a certificate signed by a valid CA (see x-ssl-ca-file), and check the CN or SAN entry in the certificate against the expected peer hostname (see hostname and x-ssl-peer-name properties) "verify-cert" (default if no x-ssl-peer-name given) - like verify-peer, but skips the check of the peer hostname. Vulnerable to man-in-the-middle attack. "no-verify" - do not require the peer to provide a certificate. Results in a weaker encryption stream, and other vulnerabilities. x-ssl-peer-name: string, DNS name of peer. Override the hostname used to authenticate peer's certificate (see x-ssl-verify-mode). The value of the 'hostname' property is used if this property is not supplied. x-ssl-allow-cleartext: boolean, Allows clients to connect without using SSL (eg, plain TCP). Used by a server that will accept clients requesting either trusted or untrusted connections. x-trace-protocol: boolean, if true, dump sent and received frames to stdout. """ super(Connection, self).__init__(name) self._transport_bound = False self._container = container self._handler = event_handler self._properties = properties or {} old_flag = self._properties.get('x-ssl-server', False) self._server = self._properties.get('x-server', old_flag) self._pn_connection = proton.Connection() self._pn_connection.container = container.name if (_PROTON_VERSION < (0, 9)): self._pn_transport = proton.Transport() else: if self._server: mode = proton.Transport.SERVER else: mode = proton.Transport.CLIENT self._pn_transport = proton.Transport(mode) self._pn_collector = proton.Collector() self._pn_connection.collect(self._pn_collector) if 'hostname' in self._properties: self._pn_connection.hostname = self._properties['hostname'] secs = self._properties.get("idle-time-out") if secs: self._pn_transport.idle_timeout = secs max_frame = self._properties.get("max-frame-size") if max_frame: self._pn_transport.max_frame_size = max_frame if 'properties' in self._properties: self._pn_connection.properties = self._properties["properties"] if self._properties.get("x-trace-protocol"): self._pn_transport.trace(proton.Transport.TRACE_FRM) # indexed by link-name self._sender_links = {} # SenderLink self._receiver_links = {} # ReceiverLink self._timers = {} # indexed by expiration date self._timers_heap = [] # sorted by expiration date self._read_done = False self._write_done = False self._error = None self._next_deadline = 0 self._user_context = None self._remote_session_id = 0 self._callback_lock = _CallbackLock() self._pn_sasl = None self._sasl_done = False # if x-force-sasl is false remove it so it does not trigger the SASL # configuration logic below if not self._properties.get('x-force-sasl', True): del self._properties['x-force-sasl'] if self._SASL_PROPS.intersection(set(self._properties.keys())): # SASL config specified, need to enable SASL if (_PROTON_VERSION < (0, 10)): # best effort map of 0.10 sasl config to pre-0.10 sasl if self._server: self.pn_sasl.server() if 'x-require-auth' in self._properties: if not self._properties['x-require-auth']: if _PROTON_VERSION >= (0, 8): self.pn_sasl.allow_skip() else: if 'x-username' in self._properties: self.pn_sasl.plain(self._properties['x-username'], self._properties.get('x-password', '')) else: self.pn_sasl.client() mechs = self._properties.get('x-sasl-mechs') if mechs: self.pn_sasl.mechanisms(mechs) else: # new Proton SASL configuration: # maintain old behavior: allow PLAIN and ANONYMOUS # authentication. Override this using x-sasl-mechs below: self.pn_sasl.allow_insecure_mechs = True if 'x-require-auth' in self._properties: ra = self._properties['x-require-auth'] self._pn_transport.require_auth(ra) if 'x-username' in self._properties: self._pn_connection.user = self._properties['x-username'] if 'x-password' in self._properties: self._pn_connection.password = \ self._properties['x-password'] if 'x-sasl-mechs' in self._properties: mechs = self._properties['x-sasl-mechs'].upper() self.pn_sasl.allowed_mechs(mechs) if 'PLAIN' not in mechs and 'ANONYMOUS' not in mechs: self.pn_sasl.allow_insecure_mechs = False if 'x-sasl-config-dir' in self._properties: self.pn_sasl.config_path( self._properties['x-sasl-config-dir']) if 'x-sasl-config-name' in self._properties: self.pn_sasl.config_name( self._properties['x-sasl-config-name']) # intercept any SSL failures and cleanup resources before propagating # the exception: try: self._pn_ssl = self._configure_ssl(properties) except: self.destroy() raise @property def container(self): return self._container @property # TODO(kgiusti) - hopefully remove def pn_transport(self): return self._pn_transport @property # TODO(kgiusti) - hopefully remove def pn_connection(self): return self._pn_connection @property def name(self): return self._name @property def remote_container(self): """Return the name of the remote container. Should be present once the connection is active. """ return self._pn_connection.remote_container @property def remote_hostname(self): """Return the hostname advertised by the remote, if present.""" if self._pn_connection: return self._pn_connection.remote_hostname return None @property def remote_properties(self): """Properties provided by the peer.""" if self._pn_connection: return self._pn_connection.remote_properties return None @property def pn_sasl(self): if not self._pn_sasl: self._pn_sasl = self._pn_transport.sasl() return self._pn_sasl def pn_ssl(self): """Return the Proton SSL context for this Connection.""" return self._pn_ssl def _get_user_context(self): return self._user_context def _set_user_context(self, ctxt): self._user_context = ctxt _uc_docstr = """Associate an arbitrary user object with this Connection.""" user_context = property(_get_user_context, _set_user_context, doc=_uc_docstr) def open(self): if not self._transport_bound: self._pn_transport.bind(self._pn_connection) self._transport_bound = True if self._pn_connection.state & proton.Endpoint.LOCAL_UNINIT: self._pn_connection.open() def close(self, pn_condition=None): for link in list(self._sender_links.values()): link.close(pn_condition) for link in list(self._receiver_links.values()): link.close(pn_condition) if pn_condition: self._pn_connection.condition = pn_condition if self._pn_connection.state & proton.Endpoint.LOCAL_ACTIVE: self._pn_connection.close() @property def active(self): """Return True if both ends of the Connection are open.""" return self._endpoint_state == self._ACTIVE @property def closed(self): """Return True if the Connection has finished closing.""" return (self._write_done and self._read_done) @_not_reentrant def destroy(self): # if a connection is destroyed without flushing pending output, # the remote will see an unclean shutdown (framing error) if self.has_output > 0: LOG.debug("Connection with buffered output destroyed") self._error = "Destroyed by the application" self._handler = None self._properties = None tmp = self._sender_links.copy() for l in tmp.values(): l.destroy() assert(len(self._sender_links) == 0) tmp = self._receiver_links.copy() for l in tmp.values(): l.destroy() assert(len(self._receiver_links) == 0) self._timers.clear() self._timers_heap = None self._container.remove_connection(self._name) self._container = None self._user_context = None self._callback_lock = None if self._transport_bound: self._pn_transport.unbind() self._pn_transport = None self._pn_connection.free() self._pn_connection = None if _PROTON_VERSION < (0, 8): # memory leak: drain the collector before releasing it while self._pn_collector.peek(): self._pn_collector.pop() self._pn_collector = None self._pn_sasl = None self._pn_ssl = None _CLOSED = (proton.Endpoint.LOCAL_CLOSED | proton.Endpoint.REMOTE_CLOSED) _ACTIVE = (proton.Endpoint.LOCAL_ACTIVE | proton.Endpoint.REMOTE_ACTIVE) @_not_reentrant def process(self, now): """Perform connection state processing.""" if self._pn_connection is None: LOG.error("Connection.process() called on destroyed connection!") return 0 # do nothing until the connection has been opened if self._pn_connection.state & proton.Endpoint.LOCAL_UNINIT: return 0 if self._pn_sasl and not self._sasl_done: # wait until SASL has authenticated if (_PROTON_VERSION < (0, 10)): if self._pn_sasl.state not in (proton.SASL.STATE_PASS, proton.SASL.STATE_FAIL): LOG.debug("SASL in progress. State=%s", str(self._pn_sasl.state)) if self._handler: with self._callback_lock: self._handler.sasl_step(self, self._pn_sasl) return self._next_deadline self._sasl_done = True if self._handler: with self._callback_lock: self._handler.sasl_done(self, self._pn_sasl, self._pn_sasl.outcome) else: if self._pn_sasl.outcome is not None: self._sasl_done = True if self._handler: with self._callback_lock: self._handler.sasl_done(self, self._pn_sasl, self._pn_sasl.outcome) # process timer events: timer_deadline = self._expire_timers(now) transport_deadline = self._pn_transport.tick(now) if timer_deadline and transport_deadline: self._next_deadline = min(timer_deadline, transport_deadline) else: self._next_deadline = timer_deadline or transport_deadline # process events from proton: pn_event = self._pn_collector.peek() while pn_event: # LOG.debug("pn_event: %s received", pn_event.type) if _Link._handle_proton_event(pn_event, self): pass elif self._handle_proton_event(pn_event): pass elif _SessionProxy._handle_proton_event(pn_event, self): pass self._pn_collector.pop() pn_event = self._pn_collector.peek() # check for connection failure after processing all pending # engine events: if self._error: if self._handler: # nag application until connection is destroyed self._next_deadline = now with self._callback_lock: self._handler.connection_failed(self, self._error) elif (self._endpoint_state == self._CLOSED and self._read_done and self._write_done): # invoke closed callback after endpoint has fully closed and # all pending I/O has completed: if self._handler: with self._callback_lock: self._handler.connection_closed(self) return self._next_deadline @property def next_tick(self): text = "next_tick deprecated, use deadline instead" warnings.warn(DeprecationWarning(text)) return self.deadline @property def deadline(self): """Must invoke process() on or before this timestamp.""" return self._next_deadline @property def needs_input(self): if self._read_done: LOG.debug("needs_input EOS") return self.EOS try: capacity = self._pn_transport.capacity() except Exception as e: self._read_done = True self._connection_failed(str(e)) return self.EOS if capacity >= 0: return capacity LOG.debug("needs_input read done") self._read_done = True return self.EOS def process_input(self, in_data): c = min(self.needs_input, len(in_data)) if c <= 0: return c try: rc = self._pn_transport.push(in_data[:c]) except Exception as e: self._read_done = True self._connection_failed(str(e)) return self.EOS if rc: # error? LOG.debug("process_input read done") self._read_done = True return self.EOS # hack: check if this was the last input needed by the connection. # If so, this will set the _read_done flag and the 'connection closed' # callback can be issued on the next call to process() self.needs_input return c def close_input(self, reason=None): if not self._read_done: try: self._pn_transport.close_tail() except Exception as e: self._connection_failed(str(e)) LOG.debug("close_input read done") self._read_done = True @property def has_output(self): if self._write_done: LOG.debug("has output EOS") return self.EOS try: pending = self._pn_transport.pending() except Exception as e: self._write_done = True self._connection_failed(str(e)) return self.EOS if pending >= 0: return pending LOG.debug("has output write_done") self._write_done = True return self.EOS def output_data(self): """Get a buffer of data that needs to be written to the network. """ c = self.has_output if c <= 0: return None try: buf = self._pn_transport.peek(c) except Exception as e: self._connection_failed(str(e)) return None return buf def output_written(self, count): try: self._pn_transport.pop(count) except Exception as e: self._write_done = True self._connection_failed(str(e)) # hack: check if this was the last output from the connection. If so, # this will set the _write_done flag and the 'connection closed' # callback can be issued on the next call to process() self.has_output def close_output(self, reason=None): if not self._write_done: try: self._pn_transport.close_head() except Exception as e: self._connection_failed(str(e)) LOG.debug("close output write done") self._write_done = True def create_sender(self, source_address, target_address=None, event_handler=None, name=None, properties=None): """Factory method for Sender links.""" ident = name or str(source_address) if ident in self._sender_links: raise KeyError("Sender %s already exists!" % ident) session = _SessionProxy("session-%s" % ident, self) session.open() sl = session.new_sender(ident) sl.configure(target_address, source_address, event_handler, properties) self._sender_links[ident] = sl return sl def accept_sender(self, link_handle, source_override=None, event_handler=None, properties=None): link = self._sender_links.get(link_handle) if not link: raise Exception("Invalid link_handle: %s" % link_handle) pn_link = link._pn_link if pn_link.remote_source.dynamic and not source_override: raise Exception("A source address must be supplied!") source_addr = source_override or pn_link.remote_source.address link.configure(pn_link.remote_target.address, source_addr, event_handler, properties) return link def reject_sender(self, link_handle, pn_condition=None): """Rejects the SenderLink, and destroys the handle.""" link = self._sender_links.get(link_handle) if not link: raise Exception("Invalid link_handle: %s" % link_handle) link.reject(pn_condition) # note: normally, link.destroy() cannot be called from a callback, # but this link was never made available to the application so this # link is only referenced by the connection link.destroy() def create_receiver(self, target_address, source_address=None, event_handler=None, name=None, properties=None): """Factory method for creating Receive links.""" ident = name or str(target_address) if ident in self._receiver_links: raise KeyError("Receiver %s already exists!" % ident) session = _SessionProxy("session-%s" % ident, self) session.open() rl = session.new_receiver(ident) rl.configure(target_address, source_address, event_handler, properties) self._receiver_links[ident] = rl return rl def accept_receiver(self, link_handle, target_override=None, event_handler=None, properties=None): link = self._receiver_links.get(link_handle) if not link: raise Exception("Invalid link_handle: %s" % link_handle) pn_link = link._pn_link if pn_link.remote_target.dynamic and not target_override: raise Exception("A target address must be supplied!") target_addr = target_override or pn_link.remote_target.address link.configure(target_addr, pn_link.remote_source.address, event_handler, properties) return link def reject_receiver(self, link_handle, pn_condition=None): link = self._receiver_links.get(link_handle) if not link: raise Exception("Invalid link_handle: %s" % link_handle) link.reject(pn_condition) # note: normally, link.destroy() cannot be called from a callback, # but this link was never made available to the application so this # link is only referenced by the connection link.destroy() @property def _endpoint_state(self): return self._pn_connection.state def _remove_sender(self, name): if name in self._sender_links: del self._sender_links[name] def _remove_receiver(self, name): if name in self._receiver_links: del self._receiver_links[name] def _connection_failed(self, error="Error not specified!"): """Clean up after connection failure detected.""" if not self._error: LOG.error("Connection failed: %s", str(error)) self._error = error def _configure_ssl(self, properties): if (not properties or not self._SSL_PROPS.intersection(set(iter(properties)))): return None mode = proton.SSLDomain.MODE_CLIENT if properties.get('x-ssl-server', properties.get('x-server')): mode = proton.SSLDomain.MODE_SERVER identity = properties.get('x-ssl-identity') ca_file = properties.get('x-ssl-ca-file') if (not ca_file and properties.get('x-ssl') and hasattr(ssl, 'get_default_verify_paths')): ca_file = ssl.get_default_verify_paths().cafile hostname = properties.get('x-ssl-peer-name', properties.get('hostname')) # default to most secure level of certificate validation if not ca_file: vdefault = 'no-verify' elif not hostname: vdefault = 'verify-cert' else: vdefault = 'verify-peer' vmode = properties.get('x-ssl-verify-mode', vdefault) try: vmode = self._VERIFY_MODES[vmode] except KeyError: raise proton.SSLException("bad value for x-ssl-verify-mode: '%s'" % vmode) if vmode == proton.SSLDomain.VERIFY_PEER_NAME: if not hostname or not ca_file: raise proton.SSLException("verify-peer needs x-ssl-peer-name" " and x-ssl-ca-file") elif vmode == proton.SSLDomain.VERIFY_PEER: if not ca_file: raise proton.SSLException("verify-cert needs x-ssl-ca-file") # This will throw proton.SSLUnavailable if SSL support is not installed domain = proton.SSLDomain(mode) if identity: # our identity: domain.set_credentials(identity[0], identity[1], identity[2]) if ca_file: # how we verify peers: domain.set_trusted_ca_db(ca_file) domain.set_peer_authentication(vmode, ca_file) if mode == proton.SSLDomain.MODE_SERVER: if properties.get('x-ssl-allow-cleartext'): domain.allow_unsecured_client() pn_ssl = proton.SSL(self._pn_transport, domain) if hostname: pn_ssl.peer_hostname = hostname LOG.debug("SSL configured for connection %s", self._name) return pn_ssl def _add_timer(self, deadline, callback): callbacks = self._timers.get(deadline) if callbacks is None: callbacks = set() self._timers[deadline] = callbacks heapq.heappush(self._timers_heap, deadline) if deadline < self._next_deadline: self._next_deadline = deadline callbacks.add(callback) def _cancel_timer(self, deadline, callback): callbacks = self._timers.get(deadline) if callbacks: callbacks.discard(callback) # next expire will discard empty deadlines def _expire_timers(self, now): while (self._timers_heap and self._timers_heap[0] <= now): deadline = heapq.heappop(self._timers_heap) callbacks = self._timers.get(deadline) while callbacks: callbacks.pop()() del self._timers[deadline] return self._timers_heap[0] if self._timers_heap else 0 # Proton's event model was changed after 0.7 if (_PROTON_VERSION >= (0, 8)): _endpoint_event_map = { proton.Event.CONNECTION_REMOTE_OPEN: Endpoint.REMOTE_OPENED, proton.Event.CONNECTION_REMOTE_CLOSE: Endpoint.REMOTE_CLOSED, proton.Event.CONNECTION_LOCAL_OPEN: Endpoint.LOCAL_OPENED, proton.Event.CONNECTION_LOCAL_CLOSE: Endpoint.LOCAL_CLOSED} def _handle_proton_event(self, pn_event): ep_event = Connection._endpoint_event_map.get(pn_event.type) if ep_event is not None: self._process_endpoint_event(ep_event) elif pn_event.type == proton.Event.CONNECTION_INIT: LOG.debug("Connection created: %s", pn_event.context) elif pn_event.type == proton.Event.CONNECTION_FINAL: LOG.debug("Connection finalized: %s", pn_event.context) elif pn_event.type == proton.Event.TRANSPORT_ERROR: self._connection_failed(str(self._pn_transport.condition)) else: return False # unknown return True # handled elif hasattr(proton.Event, "CONNECTION_LOCAL_STATE"): # 0.7 proton event model def _handle_proton_event(self, pn_event): if pn_event.type == proton.Event.CONNECTION_LOCAL_STATE: self._process_local_state() elif pn_event.type == proton.Event.CONNECTION_REMOTE_STATE: self._process_remote_state() else: return False # unknown return True # handled else: raise Exception("The installed version of Proton is not supported.") # endpoint state machine actions: def _ep_active(self): """Both ends of the Endpoint have become active.""" LOG.debug("Connection is up") if self._handler: with self._callback_lock: self._handler.connection_active(self) def _ep_need_close(self): """The remote has closed its end of the endpoint.""" LOG.debug("Connection remotely closed") if self._handler: cond = self._pn_connection.remote_condition with self._callback_lock: self._handler.connection_remote_closed(self, cond) def _ep_error(self, error): """The endpoint state machine failed due to protocol error.""" super(Connection, self)._ep_error(error) self._connection_failed("Protocol error occurred.") # order by name def __lt__(self, other): return self.name < other.name def __le__(self, other): return self < other or self.name == other.name def __gt__(self, other): return self.name > other.name def __ge__(self, other): return self > other or self.name == other.name pyngus-2.2.1/pyngus/__init__.py0000664003420400342040000000216513107111610017654 0ustar kgiustikgiusti00000000000000# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from pyngus.container import Container from pyngus.connection import Connection, ConnectionEventHandler from pyngus.link import ReceiverLink, ReceiverEventHandler from pyngus.link import SenderLink, SenderEventHandler from pyngus.sockets import read_socket_input from pyngus.sockets import write_socket_output VERSION = (2, 2, 1) # major, minor, fix pyngus-2.2.1/pyngus/container.py0000664003420400342040000000552213046104577020117 0ustar kgiustikgiusti00000000000000# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ "Container" ] import heapq import logging from pyngus.connection import Connection LOG = logging.getLogger(__name__) class Container(object): """An implementation of an AMQP 1.0 container.""" def __init__(self, name, properties=None): self._name = name self._connections = {} self._properties = properties def destroy(self): conns = list(self._connections.values()) for conn in conns: conn.destroy() @property def name(self): return self._name def create_connection(self, name, event_handler=None, properties=None): if name in self._connections: raise KeyError("connection '%s' already exists" % str(name)) conn = Connection(self, name, event_handler, properties) if conn: self._connections[name] = conn return conn def need_processing(self): """A utility to help determine which connections need processing. Returns a triple of lists containing those connections that 0) need to read from the network, 1) need to write to the network, 2) waiting for pending timers to expire. The timer list is sorted with the connection next expiring at index 0. """ readers = [] writers = [] timer_heap = [] for c in iter(self._connections.values()): if c.needs_input > 0: readers.append(c) if c.has_output > 0: writers.append(c) if c.deadline: heapq.heappush(timer_heap, (c.next_tick, c)) timers = [] while timer_heap: x = heapq.heappop(timer_heap) timers.append(x[1]) return (readers, writers, timers) def resolve_sender(self, target_address): pass def resolve_receiver(self, source_address): pass def get_connection(self, name): return self._connections.get(name, None) def remove_connection(self, name): if name in self._connections: del self._connections[name] pyngus-2.2.1/README.md0000664003420400342040000000762113107110334015501 0ustar kgiustikgiusti00000000000000# Pyngus # [![Build Status](https://travis-ci.org/kgiusti/pyngus.svg)](https://travis-ci.org/kgiusti/pyngus) A messaging framework built on the QPID Proton engine. It provides a callback-based API for message passing. See the User Guide in the docs directory for more detail. ## Release 2.2.1 ## * disable the socket I/O logging - fills the debug logs with lots of useless crap. ## Release 2.2.0 ## * Can now use the system's default CA by specifying the 'x-ssl' option in the 'properties' field of the create_connection call and _NOT_ specifying the 'x-ssl-ca-file' property. (contributed by Juan Antonio Osorio Robles) * use the most secure default setting for x-ssl-verify-mode based on the configuration * bump max proton version to 0.17 ## Release 2.1.4 ## * avoid using deprecated next_tick in the container * enable Python 3.5 testing in tox * add client authentication via SSL tests * bump max proton version to 0.16 ## Release 2.1.3 ## * Remove chatty debug log messages * fix pep8 violation * add static performace test tool * Bump max proton version to 0.15 ## Release 2.1.2 ## * Bump max proton version to 0.14 ## Release 2.1.1 ## * bugfix: under some (rare) flow/credit interactions a sender may stall. Changed code to invoke credit_granted() callback more frequently. ## Release 2.1.0 ## * feature: add 'x-force-sasl' to connection property map * bugfix: update old SASL unit test ## Release 2.0.4 ## * Bump max proton version to 0.13 * performance tweak to link event handling * fix perf-test.py tool * bugfix: fix leak of timer callbacks * enable Python 3.4 testing * bugfix: fix receiver example (recv.py) * several fixes to the SASL unit tests * bugfix: fix leak of underlying proton objects * Add SASL/SSL configuration options to examples * bugfix: allow PLAIN or ANONYMOUS authentication in server mode ## Release 2.0.3 ## * bugfix: fixed a memory leak * bugfix: cyrus test fixed ## Release 2.0.0 ## * Support for proton 0.10 * The SASL API has changed due to an API change in proton 0.10 * Proton 0.10 implements SASL via the Cyrus SASL library * this change allows use of more secure authentication mechanisms, such as Kerberos * Applications should no longer directly access the Proton SASL class via the Connection.pn\_sasl property * instead, the following new properties may be passed to the Container.create\_connection() method: * x-username - (client only) the authentication id * x-password - (client only) the authentication password * x-require-auth - (server only) reject clients that do not use authentication * x-sasl-mechs - (server only) whitespace delimited string of acceptable mechanisms. If not supplied the mechanisms specified in the system's SASL configuration will be used. This option should only be used when the application wants to further restrict the set of acceptable mechanisms. * x-sasl-config-dir - (server only) the location of the _directory_ that holds the system's Cyrus SASL configuration. * x-sasl-config-name - (server only) the name of the SASL configuration file (*without* the ".conf" suffix) in the x-sasl-dir directory. * *NOTE WELL*: Cyrus SASL cannot support multiple different SASL configurations per connection. The values of x-sasl-config-dir and x-sasl-config-name *MUST* be the same for all connections that use SASL. * the ConnectionEventHandler.sasl\_step() callback has been deprecated as proton 0.10 no longer uses it * The ConnectionEventHandler.sasl\_done() callback *is* still supported. * Pyngus now enforces strict reentrancy checking. Attempting to call a non-reentrant Pyngus method will now throw a RuntimeError exception. ## Release 1.3.0 ## * Support for proton 0.9 * Installation of proton dependencies via setup.py. This feature was added by Flavio Percoco Premoli - thanks Flavio! pyngus-2.2.1/MANIFEST.in0000664003420400342040000000014713024050133015752 0ustar kgiustikgiusti00000000000000include README.md include LICENSE include docs/User-Guide.md exclude .gitignore global-exclude *.pyc pyngus-2.2.1/docs/0000775003420400342040000000000013107111745015153 5ustar kgiustikgiusti00000000000000pyngus-2.2.1/docs/User-Guide.md0000664003420400342040000010614212561257442017462 0ustar kgiustikgiusti00000000000000# Pyngus # A callback-based messaging framework built around the QPID Proton engine. # Purpose # This framework is meant to ease the integration of AMQP 1.0 messaging into applications that use a callback-based logic flow. It provides a very basic, connection-oriented messaging model that should meet the needs of most applications. The framework has been designed with the following goals in mind: * provide a callback-based messaging API * simplify the user model exported by the Proton engine - you should not have to be an expert in AMQP to use this framework! * give the application control of the I/O implementation where possible * limit the functionality provided by Proton to a subset that should be adequate for 79% of all messaging use-cases [1] All actions are designed to be non-blocking, leveraging callbacks where asynchronous behavior is modeled. There is no threading architecture assumed or locking performed by this framework. Locking is assumed to be handled outside of this framework by the application - all processing provided by this framework is assumed to be single-threaded [2]. [1] Unlike the Proton Engine, this framework does not intend to support all functionality and features defined by the AMQP 1.0 protocol. It is only intended to provide basic message passing functionality. [2] Not entirely true - it may be possible to multithread as long as connections are not shared across threads. TBD ## What this framework doesn't do ## * __Message management__ - All messages are assumed to be Proton Messages. Creating and parsing Messages is left to the application. * __Routing__ - This framework basically ignores the "to" or "reply-to" contained in the message. It leaves these fields under the control and interpretation of the application. This means that the application determines the proper Link over which to send an outgoing message. In addition, it assumes the application can dispatch messages arriving on a link to the proper handler [3]. * __Connection management__ - It is expected that your application will manage the creation and configuration of sockets. Whether those sockets are created by initiating a connection or accepting an inbound connection is irrelevant to this framework. It is also assumed that, if desired, your application will be responsible for monitoring the sockets for I/O activity (e.g. call poll()). The framework will support both blocking and non-blocking sockets, however it may block when doing I/O over a blocking socket. Note well: reconnect and failover must also be handled by the application [3]. * __Flow control__ - It is assumed the application will control the number of messages that can be accepted by a receiving link (capacity). Sent messages will be queued locally until credit is made available for the message(s) to be transmitted. The framework's API allows the application to monitor the amount of credit available on an outgoing link [3]. [3] All these features are provided by the QPID Messenger API. The Messenger API may be a better match for your application if any of these features are required. See the Messenger section of the [Apache QPID website](http://qpid.apache.org/components/messenger/index.html "Messenger") for more detail. # Theory of Operations # This framework defines the following set of objects: * __Container__ - an implementation of the container concept defined by AMQP 1.0. This object is a factory for Connections. * __Connection__ - an implementation of the connection concept defined by AMQP 1.0. You can think of this as a data pipe between two Containers. This object is a factory for links. * __Links__ - A uni-directional pipe for messages traveling between resources (nodes) within a container. A link exists within a Connection, and uses the Connection as its data path to the remote. There are two sub-classes of Links: *SenderLinks* and *ReceiverLinks*. SenderLinks produce messages from a particular node. ReceiverLinks consume messages on behalf of a local node. An application creates one or more Containers, which represents a domain for a set of message-oriented **nodes**. Nodes are those components within an application that are the source or sink of a message flow. Example nodes would include message queues, message topics, a database, an event logger, etc. A node is identified by its name, which must uniquely identify the node within its container. To pass messages between nodes, the application must first set up a Connection between the two Containers that hold the nodes. To do this, the application creates a network connection to the system that holds the remote Container. How this network connection is created is determined by the application's design and purpose. For example, the application may proactively initiate these network connections (eg. call connect()), or passively listen for connection requests coming from remote systems (eg. listen()/accept()). The method used by the application to determine which systems it should connect to in order to access particular resources (eg. nodes) is left to the application designers. The application must then create a Connection object to manage the network connection it has set up. A Connection object is allocated from the Container, and represents the data pipe between the local and remote Containers. The Connection consumes data arriving from, and produces data for, its network connection. It is the responsiblity of the application to transfer network data between the Connection object and its corresponding network connection. To transfer messages between the connected Container's nodes, a *link* must be created. A link is uni-directional; from the application's perspective, it either sends messages to a remote node, or consumes messages from a remote node. The framework provides two distinct link classes - one for sending messages to a node, the other for receiving messages from a node. To send messages to a node on the remote Container, the application allocates a *SenderLink* from the Connection that attaches to that remote Container. The application assigns a local name to the SenderLink which identifies the node that is the source of the messages sent byit. This is the *Source node address*, and is made available to the remote so it may classify the origin of the message stream. The application may also supply the address of the node to which it is sending. This is the *Target node address*. The Target address supplied by the application is merely a hint for the peer - the peer may override the supplied address and provide the actual Target address in use by the peer. If no Target address is given the remote may allocate one on behalf of the sending application. The SenderLink's final Target address is made available to the sending application once the link has completed setup. When sending a message, an application can choose whether or not it needs to know about the arrival status of the message at the remote node. The application may send the message as *best effort* if it doesn't care about the arrival status. This *send-and-forget* service provides no feedback on the delivery of the message. Otherwise the application may register a callback that is invoked when the delivery status of the message is determined by the framework. All messages are sent using *at-most-once* semantics: this framework does not attempt to re-send messages on behalf of the application. If reliable messaging is required the application must implement its own retry logic. If the application needs to consume messages from a node on the remote Container, it allocates a *ReceiverLink* from the Connection that attaches to that remote Container. The application assigns a local name to the ReceiverLink that identifies the local node that is the consumer of all the messages that arrive on the link. This is the *Target node address*, and is made available to the remote so it may identify the destination of the message stream. The application may also supply the address of the remote node from which it is consuming. This is the *Source node address*. The Source address supplied by the receiver is merely a hint for the remote application - the remote may override this address and provide the actual Source address used by the remote. If no Source address is given the remote may allocate one on behalf of the receiving application. The ReceiverLink's final Source address is made available to the receiving application once the link has completed setup. ## Callback Events ## This framework uses a callback model to notify the application when messaging-related events have occurred. Each of the object types provided by the framework define a set of events that may be of interest to the application. To receive these events, the application must register callback handlers with each object that it manages. See the API section for details regarding each class's event handlers. ---------- # API # ## The Container Class ## The Container class provides a named repository for nodes. It is identified by a name, which must uniquely identify the Container across all Containers in the messaging domain. TBD: locking - allow thread-per-connection functionality, would need locking for container management of connections!! ### Container Methods ### `Container(name, properties)` Construct a container. Parameters: * __name__ - string, an identifier for the new container, __MUST__ be unique across the entire messaging domain. * __properties__ - map, contents TBD `Container.create_connection(name, ConnectionEventHandler, properties)` The factory for Connection objects. Use this to create a connection to a peer Container. Your application must create a unique Connection object for each network connection it makes (eg. per-socket, in the case of TCP). Parameters: * __name__ - string, name of this Connection. The name __MUST__ uniquely identify this connection within the Container - no two Connections within a Container can share the same name. * __ConnectionEventHandler__ - object, provides callback handlers for the new Connection (see below). * __properties__ - map containing the following optional connection attributes: * "hostname" - string, DNS name of __remote__ host (ie. the host that is being connected to). This name will also be used by the SSL layer to check the CommonName/SAN contained in the certificate provided by the peer. * "idle-time-out" - integer, time in seconds before the Connection is closed due to lack of traffic. Setting this may enable heartbeat generation by the peer, if supported. * "x-trace-protocol" - boolean, if True, enable debug dumps of the AMQP wire traffic. * "x-server" - boolean, set this to True to configure the connection as a server side connection. This should be set True if the connection was remotely initiated (e.g. accept on a listening socket). If the connection was locally initiated (e.g. by calling connect()), then this value should be set to False. This setting is used by authentication and encryption to configure the connection's role. The default value is False for client mode. * "x-username" - string, the client's username to use when authenticating with a server. * "x-password" - string, the client's password, used for authentication. * "x-require-auth" - boolean, reject remotely-initiated client connections that fail to provide valid credentials for authentication. * "x-sasl-mechs" - string, a space-separated list of mechanisms that are allowed for authentication. Defaults to "ANONYMOUS" * "x-ssl-ca-file" - string, path to a PEM file containing the certificates of the trusted Certificate Authorities that will be used to check the signature of the peer's certificate. * "x-ssl-server" - __DEPRECATED__ use x-server instead. * "x-ssl-identity" - tuple, contains self-identifying certificate information which will be presented to the peer. The first item in the tuple is the path to the certificate file (PEM format). The second item is the path to a file containing the private key used to sign the certificate (PEM format, optional if private key is stored in the certificate itself). The last item is the password used to encrypt the private key (string, not required if private key is not encrypted) * "x-ssl-verify-mode" - string, configure the level of security provided by SSL. Possible values: * "verify-peer" (default) - most secure, requires peer to supply a certificate signed by a valid CA (see x-ssl-ca-file), and checks the CN or SAN entry in the certificate against the expected peer hostname (see x-ssl-peer-name) * "verify-cert" (default if no hostname or x-ssl-peer-name given) - like verify-peer, but skips the check of the peer hostname. Vulnerable to man-in-the-middle attack. * "no-verify" - do not require the peer to provide a certificate. Results in a weaker encryption stream, and other vulnerabilities. * "x-ssl-peer-name" - string, DNS name of peer. Can be used to override the value passed in by the "hostname" option, if necessary. A DNS host name is required to authenticate peer's certificate (see x-ssl-verify-mode). * "x-ssl-allow-cleartext" - boolean, allows clients to connect without using SSL (eg, plain TCP). Used by a server that will accept clients requesting either trusted or untrusted connections. `Container.name()` Returns the name of the Container. `Container.need_processing()` A utility to help determine which Connections need processing. Returns a triple of lists containing those connections that: * need to read from the network (index 0) * need to write to the network (index 1) * waiting for their *next-tick* timer to expire (see *Connection.process()*). (index 2) The timer list is sorted with the Connection next expiring at index 0. `Container.get_connection(name)` Returns the Connection instance identified by *name*. ## The Connection Class ## A Connection is created from the Container that it is going to 'connect'. See the *create_connection()* Container method. `Connection.name()` Returns the name of the Connection. `Connection.remote_container()` Returns the name of the remote container. This name is only available once the Connection has become Active. `Connection.user_context` A opaque handle that can be set by the application for its own per-Connection data. `Connection.open()` Initiate the connection to the remote peer. This must be called in order to transfer data over the Connection. The Connection is considered active once both peers have opened it. `Connection.close()` Terminate the connection to the remote peer. This should be called when the application is done with the Connection and wants to perform a clean close. The Connection is considered closed when both peers have closed it. `Connection.closed()` True when both peers have completed closing the Connection. `Connection.destroy()` This releases the Connection and all links that use the connection. This must be called to release the resources used by the Connection. Once called the Connection is no longer present - the application should drop all references to the destroyed Connection. `Connection.process(now)` This causes the Connection to run the AMQP protocol state machine. This method must be called periodically (see *Connection.next_tick*) and whenever network I/O has been done on the connection (see below). Event callbacks may occur when this method is invoked. The *now* parameter is the current time (format determined by the platform). This method returns a timestamp which is the maximum time interval the application can wait before it must call Connection.process() again. If Connection.process() is not called at or before this deadline the Connection may fail. `Connection.next_tick()` Returns the deadline for the next call to Connection.process(). This is the same value that was returned by the last call to `Connection.needs_input()` Returns the number of bytes of inbound network data this Connection is capable of consuming. Returns zero if no input can be processed at this time. Returns `EOS` when the input pipe has been closed. `Connection.process_input(data)` Process data read from the network. Returns the number of bytes from `data` that have been processed, which will be no less than the last value returned from `Connection.need_input()`. Returns EOS if the input pipe has been closed. The application should call Connection.process() after calling this method. `Connection.input_closed(reason)` The application must call this method when the inbound network data source has closed. This indicates that no more data arrive for this Connection. The application should call Connection.process() after calling this method. `Connection.has_output()` Returns the number of bytes of output data the Connection has buffered. This data needs to be written to the network. Returns zero when no pending output is available. Returns EOS when the output pipe has been closed. The application should call Connection.process() prior to calling this method. `Connection.output_data()` Returns a buffer containing data that needs to be written to the network. Returns None if no data or the output pipe has been closed. `Connection.output_written(N)` The application must call this to notify the framework that N bytes of output data (as given by `Connection.output_data()`) has been written to the network. This will cause the framework to release the first N bytes from the buffer output data. `Connection.output_closed()` The application must call this method when the outbound network data pipe has closed. This indicates that no more data can be written to the network. `Connection.create_sender(source_address, target_address, SenderEventHandler, name, properties)` Construct a SenderLink over this Connection which will send messages to the node identified by *target_address* on the remote. If *target_address* is None the remote may create a node for this link. The target address of a dynamically-created node will be made available via the SenderLink once the link is active. Parameters: * **source_address** - string, address of the local node that is generating the messages sent on this link. * **target_address** - string or None, address of the destination node that is to consume the sent messages. May be None if the remote can dynamically allocate a node for consuming the messages. * __SenderEventHandler__ - a set of callbacks for monitoring the state of the link. See below. * __name__ - string, optional name for the created link, __MUST__ be unique across all SenderLinks on this Connection. * __properties__ - map of optional properties to apply to the link: * "distribution-mode" - informs the receiver of the distribution mode of messages supplied by this link (see the AMQP 1.0 specification for details). Values: * "move" - the message will not be available for other consumers once it has been accepted by the remote. This implies that a message will be consumed by only one consumer. * "copy" - the message will continue to be available for other consumers after it has been accepted by the peer. This implies that multiple consumers may get a copy of the same message. `Connection.accept_sender(handle, source_override, SenderEventHandler, properties)` This constructs a SenderLink in response to a request from the peer to consume from a node. When a peer wants to consume messages from a local node it will request that the application create a SenderLink on its behalf. This SenderLink will be used to transfer the messages to the peer. The application is notified of such a request via the *sender_requested* Connection callback (see below). After receiving this notification the application may grant the request by calling this method. Parameters: * __handle__ - opaque handle provided by the Connection.sender_requested() callback. This handle is used by the framework to correlate the created SenderLink with the request from the peer. * **source_override** - string, the address of the source node. This allows the application to supply (or override) the source address requested by the peer. * __SenderEventHandler__ - object containing callbacks for events generated by this SenderLink (see below). * __properties__ - map of properties used by the SenderLink. Same values as supplied to the *create_sender* method. Values in this map will override any properties requested by the peer. `Connection.reject_sender(handle, reason)` Called by the application to reject a request from the peer to create a SenderLink. This should be called instead of *accept_sender()* if the application wants to deny the peer's request. The application is notified of such a request via the *sender_requested* Connection callback (see below). Parameters: * __handle__ - the opaque handle provided by the *Connection.sender_requested()* callback. * __reason__ - **TBD** `Connection.create_receiver( target_address, source_address, ReceiverEventHandler, name, properties)` Construct a ReceiverLink over this Connection which will consume messages from the node identified by *source_address* on the remote. If *source_address* is None the remote may create a node for this link. The source address of the dynamically-created node will be made available via the ReceiverLink once the link is active. Parameters: * **target_address** - string address of the local node that is consuming the messages arriving on the link. * **source_address** - string or None, address of the remote node that is to supply the messages arriving on the link. May be None if the remote can dynamically allocate a node for the source. * __receiverEventHandler__ - object containing a set of callbacks for receiving messages and monitoring the state of the link. See below. * __properties__ - map of optional properties to apply to the link: * "distribution-mode" - Requests the distribution mode that the peer's SenderLink should use when supplying messages. This is merely a request and can be overridden by the peer. Values are the same as given for *Connection.create_sender()* `Connection.accept_receiver(handle, target_override, ReceiverEventHandler, properties)` This constructs a ReceiverLink in response to a request from the peer to send messages to a node. When a peer wants to send messages to a local node it will request that the application create a ReceiverLink on its behalf. This ReceiverLink will be used to consume the messages sent by the peer. The application is notified of such a request via the *receiver_requested* Connection callback (see below). After receiving this notification the application may grant the request by calling this method. Parameters: * __handle__ - opaque handle provided by the Connection.receiver_requested() callback. This handle is used by the framework to correlate the created ReceiverLink with the request from the peer. * __target_override__ - string, the address of the target node. This allows the application to supply (or override) the target address requested by the peer. * __ReceiverEventHandler__ - object containing callbacks for events generated by this ReceiverLink (see below). * __properties__ - map of properties used by the ReceiverLink. **TBD** `Connection.reject_receiver(handle, reason)` Called by the application to reject a request from the peer to create a ReceiverLink. This should be called instead of *accept_receiver()* if the application wants to deny the peer's request. The application is notified of such a request via the *receiver_requested* Connection callback (see below). Parameters: * __handle__ - the opaque handle provided by the *Connection.receiver_requested()* callback. * __reason__ - **TBD** ### Connection Events ### Callbacks can be registered with an instance of a Connection object. These callbacks are invoked on the following events: * The Connection becomes active. * The peer has requested that the Connection be closed. * The Connection has closed. * The Connection has experienced a failure. * The peer wants to consume from a local node. * The peer wants to send messages to a local node. * SASL authentication Callbacks for these events are provided by the application via the ConnectionEventHandler object. The following callbacks are defined for a Connection: `connection_active(Connection)` The Connection has transitioned to the Active state. This means that both ends of the Connection can send and receive data. `connection_closed(Connection)` The Connection has closed cleanly. This event is generated as a result of both ends of the Connection being closed via the *close()* method. `connection_remote_closed(Connection, error)` Indicates that the peer has issued a *close()* on the connection. **TBD** error `connection_failed(Connection, error)` Indicates that the connection has failed. No further message passing is possible and all contained links are dead. At this point the application has no choice but to destroy the Connection. **TBD** error `sender_requested(Connection, handle, name, requested_source, properties)` The peer has requested that a SenderLink be created so it can consume messages from a node in the local Container. The *requested_source* is the address of the (source) node that the peer wishes to consume messages from. The application may accept the request by calling *Connection.accept_sender()* or deny the request by calling *Connection.reject_sender()* `receiver_requested(Connection, handle, name, requested_target, properties)` The peer needs to send messages to a node in the local container and is asking your application to create a ReceiverLink on its behalf. The intent of this ReceiverLink is to consume the incoming messages from the peer. The *requested_target* is the address of the (target) node that the peer wishes to send messages to. The application may accept the request by calling *Connection.accept_receiver()* or deny the request by calling *Connection.reject_receiver()* `sasl_step(Connection, pn_sasl)` Invoked each time the SASL negotiation transfers information. See the Proton API for more detail. `sasl_done(Connection, result)` Invoked on completion of the SASL handshake. See the Proton API for more detail. ## The SenderLink Class ## A SenderLink is created from the Connection that connects to remote Container that holdes the target node. See the *Connection.create_sender()* and *Connection.accept_sender()* methods. `SenderLink.name()` Returns the name of the SenderLink. `SenderLink.user_context` A opaque handle that can be set by the application for its own per-SenderLink data. `SenderLink.open()` A SenderLink must be opened before it will entry the Active state and messages can be sent. `SenderLink.source_address()` Returns the address of the local node that is the source of the sent messages. `SenderLink.target_address()` Returns the address of the node in the peer's Container that will accept the sent messages. Note that this address is not final until the SenderLink has reached the Active state. `SenderLink.close()` Initiate a close of the SenderLink. `SenderLink.closed()` Returns True when the SenderLink has closed. `SenderLink.destroy()` This releases the SenderLink. This must be called to release the resources used by the SenderLink. Once called the SenderLink is no longer present - the application should drop all references to the destroyed SenderLink. `SenderLink.send(message, delivery_callback, handle, deadline)` Queue a message for sending over the link. *Message* is a Proton Message object. If there is no need to know the delivery status of the message at the peer then *delivery_callback*, *handle*, and *deadline* should not be provided. In this case, the message will be sent *pre-settled*. To get notification on the delivery status of the message a *delivery_callback* and *handle* must be supplied. The *deadline* is optional in this case. This method returns 0 if the message was queued successfully (and the *delivery_callback*, if supplied, is guaranteed to be invoked). Otherwise an error occurred and the message was not queued and no callback will be made. Parameters: * __message__ - a complete Proton Message object * __handle__ - opaque object supplied by application. Passed to the *delivery_callback* method. * __deadline__ - future timestamp when the send should be aborted if it has not completed. In seconds since Epoch. * **`delivery_callback`** - an optional method that will be invoked when the delivery status of the message has reached a terminal state. The callback accepts the following parameters: * __SenderLink__ - the link over which the message was sent. * __handle__ - the handle provided in the *send()* call. * __status__ - the status of the delivery. It will be one of the following values: * `TIMED_OUT` - the delivery did not reach a terminal state before the deadline expired. Whether or not the message was actually received is unknown. * `ACCEPTED` - the remote has received and accepted the message. See `ReceiverLink.message_accepted()` * `REJECTED` - the remote has received but has rejected the message. See `ReceiverLink.message_rejected()` * `RELEASED` - the remote has received but will not accept the message. See `ReceiverLink.message_released()` * `ABORTED` - Connection or SenderLink has been closed/destroyed/failed, etc. * `UNKNOWN` - the remote did not provide a delivery status. * `MODIFIED` - **RESERVED** **TBD** * __error__ - if status is ABORTED, an error code is provided **TBD** * __outcome__ - **RESERVED** **TBD** `SenderLink.pending()` Returns the number of outging messages in the process of being sent. `SenderLink.credit()` Returns the number of messages the remote ReceiverLink has permitted the SenderLink to send. `SenderLink.flushed()` **TBD** ### SenderLink Events ### Callbacks can be registered with an instance of a SenderLink object. These callbacks are invoked on the following events: * The link becomes active. * The peer has initiated the close of the link. * The link has closed. * The peer has made credit available to the sender. Callbacks for these events are provided by the application via the SenderEventHandler objects. The following callback methods are defined for a SenderLink: `sender_active(SenderLink)` Called when the link open has completed and the SenderLink is active. `sender_closed(SenderLink)` Called when SenderLink has closed. `sender_remote_closed(SenderLink, error)` Indicates that the peer has issued a *close()* on the link. **TBD** error `credit_granted(SenderLink)` Indicates that the peer has made credit available. The current credit value can be determined via the *SenderLink.credit()* method. **TBD** - invoked only when credit transitions from <= 0 to > 0??? `flush(SenderLink)` **TBD** ## The ReceiverLink Class ## A ReceiverLink is created from the Connection that connects to the remote Container which holds the source node. See the *Connection.create_receiver()* and *Connection.accept_receiver()* methods. `ReceiverLink.name()` Returns the name of the ReceiverLink. `ReceiverLink.user_context` A opaque handle that can be set by the application for its own per-ReceiverLink data. `ReceiverLink.open()` A ReceiverLink must be opened before it will entry the Active state and messages can be sent. `ReceiverLink.source_address()` Returns the address of the node in the peer's Container that is the source of received messages. Note that this address is not final until the ReceiverLink has reached the Active state. `ReceiverLink.target_address()` Returns the address of the local node that will accept the received messages. `ReceiverLink.close()` Initiate a close of the ReceiverLink. `ReceiverLink.closed()` Returns True when the ReceiverLink has closed. `ReceiverLink.destroy()` This releases the ReceiverLink. This must be called to release the resources used by the ReceiverLink. Once called the ReceiverLink is no longer present - the application should drop all references to the destroyed ReceiverLink. `ReceiverLink.capacity()` Returns the number of messages the ReceiverLink is able to queue locally before back-pressuring the sender. Capacity decreases by one each time a message arrives. Capacity is initialized to zero when the ReceiverLink is first created - the application must call *add_capacity()* in order to allow the peer to send messages. `ReceiverLink.add_capacity(N)` Increases the credit made available to the peer by N messages. Must be called by application to replenish the sender's credit as messages arrive. `ReceiverLink.flush()` **TBD** `ReceiverLink.message_accepted( handle )` Indicate to the remote that the message identified by *handle* has been successfully processed by the application. See the *message_received* callback below, as well as the *SenderLink.send()* method. `ReceiverLink.message_rejected( handle, outcome )` Indicate to the remote that the message identified by *handle* is considered invalid and cannot be processed by the application. See the *message_received* callback below, as well as the *SenderLink.send()* method. Outcome **TBD** `ReceiverLink.message_released( handle )` Indicate to the remote that the message identified by *handle* will not be processed by the application and should be made available for other consumers. See the *message_received* callback below, as well as the *SenderLink.send()* method. `ReceiverLink.message_modified( handle, outcome )` Indicate to the remote that the message identified by *handle* was modified by the application but not processed. See the *message_received* callback below, as well as the *SenderLink.send()* method. Outcome **TBD** ### ReceiverLink Events ### Callbacks can be registered with an instance of a ReceiverLink object. These callbacks are invoked on the following events: * The link has become active. * The peer has initiated the close of the link. * The link has closed. * A message has arrived from the sender. Callbacks for these events are provided by the application via the ReceiverEventHandler objects. The following callback methods are defined for a ReceiverLink: `receiver_active(ReceiverLink)` Called when the link open has completed and the ReceiverLink is active. `receiver_closed(ReceiverLink)` Called when ReceiverLink has closed. `receiver_remote_closed(ReceiverLink, error)` Indicates that the peer has issued a *close()* on the link. **TBD** error `message_received(ReceiverLink, Message, handle)` Called when a Proton Message has arrived on the link. Use *handle* to indicate whether the message has been accepted or not by calling the appropriate method (*message_accepted*, *message_rejected*, etc) The capacity of the link will be decremented by one on return from this callback. Parameters: * __ReceiverLink__ - link which received the Message * __Message__ - a complete Proton Message * __handle__ - opaque handle used by framework to coordinate the message's receive status. `remote_flushed(ReceiverLink)` **TBD** pyngus-2.2.1/setup.py0000664003420400342040000000467013107111560015737 0ustar kgiustikgiusti00000000000000#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os from setuptools import setup _VERSION = "2.2.1" # NOTE: update __init__.py too! # I hack, therefore I am (productive) Some distros (which will not be named) # don't use setup.py to install the proton python module. In this case, pip # will not think proton is installed, and will attempt to install it, # overwriting the distro's installation. To prevent this, don't set the # 'install_requires' if the proton python module is already installed # _dependencies = [] try: import proton except ImportError: # this version of proton will download and install the proton shared # library as well: _dependencies = ['python-qpid-proton>=0.9,<0.18'] setup(name="pyngus", version=_VERSION + os.environ.get('PYNGUS_VERSION_SUFFIX', ''), author="kgiusti", author_email="kgiusti@apache.org", packages=["pyngus"], package_dir={"pyngus": "pyngus"}, description="Callback API implemented over Proton", url="https://github.com/kgiusti/pyngus", license="Apache Software License", install_requires=_dependencies, classifiers=["License :: OSI Approved :: Apache Software License", "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"]) pyngus-2.2.1/PKG-INFO0000664003420400342040000000136313107111745015323 0ustar kgiustikgiusti00000000000000Metadata-Version: 1.1 Name: pyngus Version: 2.2.1 Summary: Callback API implemented over Proton Home-page: https://github.com/kgiusti/pyngus Author: kgiusti Author-email: kgiusti@apache.org License: Apache Software License Description: UNKNOWN Platform: UNKNOWN Classifier: License :: OSI Approved :: Apache Software License Classifier: Intended Audience :: Developers Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 pyngus-2.2.1/setup.cfg0000664003420400342040000000007313107111745016044 0ustar kgiustikgiusti00000000000000[egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 pyngus-2.2.1/pyngus.egg-info/0000775003420400342040000000000013107111745017242 5ustar kgiustikgiusti00000000000000pyngus-2.2.1/pyngus.egg-info/dependency_links.txt0000664003420400342040000000000113107111744023307 0ustar kgiustikgiusti00000000000000 pyngus-2.2.1/pyngus.egg-info/PKG-INFO0000664003420400342040000000136313107111744020341 0ustar kgiustikgiusti00000000000000Metadata-Version: 1.1 Name: pyngus Version: 2.2.1 Summary: Callback API implemented over Proton Home-page: https://github.com/kgiusti/pyngus Author: kgiusti Author-email: kgiusti@apache.org License: Apache Software License Description: UNKNOWN Platform: UNKNOWN Classifier: License :: OSI Approved :: Apache Software License Classifier: Intended Audience :: Developers Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 pyngus-2.2.1/pyngus.egg-info/top_level.txt0000664003420400342040000000000713107111744021770 0ustar kgiustikgiusti00000000000000pyngus pyngus-2.2.1/pyngus.egg-info/SOURCES.txt0000664003420400342040000000044113107111745021125 0ustar kgiustikgiusti00000000000000LICENSE MANIFEST.in README.md setup.py docs/User-Guide.md pyngus/__init__.py pyngus/connection.py pyngus/container.py pyngus/endpoint.py pyngus/link.py pyngus/sockets.py pyngus.egg-info/PKG-INFO pyngus.egg-info/SOURCES.txt pyngus.egg-info/dependency_links.txt pyngus.egg-info/top_level.txtpyngus-2.2.1/LICENSE0000664003420400342040000002607512561257620015250 0ustar kgiustikgiusti00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.