gear-0.4.0/0000775000175300017540000000000012207146270013567 5ustar jenkinsjenkins00000000000000gear-0.4.0/setup.py0000775000175300017540000000132512207146231015302 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=0.5.21,<1.0'], pbr=True) gear-0.4.0/gear/0000775000175300017540000000000012207146270014505 5ustar jenkinsjenkins00000000000000gear-0.4.0/gear/cmd/0000775000175300017540000000000012207146270015250 5ustar jenkinsjenkins00000000000000gear-0.4.0/gear/cmd/__init__.py0000664000175300017540000000000012207146231017344 0ustar jenkinsjenkins00000000000000gear-0.4.0/gear/cmd/geard.py0000664000175300017540000000666112207146231016712 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import daemon import extras import gear import logging import os import pbr.version import signal import sys pid_file_module = extras.try_imports(['daemon.pidlockfile', 'daemon.pidfile']) class Server(object): def __init__(self): self.args = None self.config = None self.gear_server_pid = None def parse_arguments(self): parser = argparse.ArgumentParser(description='Gearman server.') parser.add_argument('-d', dest='nodaemon', action='store_true', help='do not run as a daemon') parser.add_argument('-p', dest='port', default=4730, help='port on which to listen') parser.add_argument('--log-config', dest='log_config', help='logging config file') parser.add_argument('--pidfile', dest='pidfile', default='/var/run/geard/geard.pid', help='PID file') parser.add_argument('--ssl-ca', dest='ssl_ca', metavar='PATH', help='path to CA certificate') parser.add_argument('--ssl-cert', dest='ssl_cert', metavar='PATH', help='path to SSL public certificate') parser.add_argument('--ssl-key', dest='ssl_key', metavar='PATH', help='path to SSL private key') parser.add_argument('--version', dest='version', action='store_true', help='show version') self.args = parser.parse_args() def setup_logging(self): if self.args.log_config: if not os.path.exists(self.args.log_config): raise Exception("Unable to read logging config file at %s" % self.args.log_config) logging.config.fileConfig(self.args.log_config) else: if self.args.nodaemon: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO, filename="/var/log/geard/geard.log") def main(self): self.server = gear.Server(self.args.port, self.args.ssl_key, self.args.ssl_cert, self.args.ssl_ca) signal.pause() def main(): server = Server() server.parse_arguments() if server.args.version: vi = pbr.version.VersionInfo('gear') print("Gear version: {}".format(vi.version_string())) sys.exit(0) server.setup_logging() if server.args.nodaemon: server.main() else: pid = pid_file_module.TimeoutPIDLockFile(server.args.pidfile, 10) with daemon.DaemonContext(pidfile=pid): server.main() if __name__ == "__main__": sys.path.insert(0, '.') main() gear-0.4.0/gear/constants.py0000664000175300017540000000357612207146231017103 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Protocol Constants ================== These are not necessary for normal API usage. See the `Gearman protocol reference `_ for an explanation of each of these. Magic Codes ----------- .. py:data:: REQ The Gearman magic code for a request. .. py:data:: RES The Gearman magic code for a response. Packet Types ------------ """ types = { 1: 'CAN_DO', 2: 'CANT_DO', 3: 'RESET_ABILITIES', 4: 'PRE_SLEEP', #unused 6: 'NOOP', 7: 'SUBMIT_JOB', 8: 'JOB_CREATED', 9: 'GRAB_JOB', 10: 'NO_JOB', 11: 'JOB_ASSIGN', 12: 'WORK_STATUS', 13: 'WORK_COMPLETE', 14: 'WORK_FAIL', 15: 'GET_STATUS', 16: 'ECHO_REQ', 17: 'ECHO_RES', 18: 'SUBMIT_JOB_BG', 19: 'ERROR', 20: 'STATUS_RES', 21: 'SUBMIT_JOB_HIGH', 22: 'SET_CLIENT_ID', 23: 'CAN_DO_TIMEOUT', 24: 'ALL_YOURS', 25: 'WORK_EXCEPTION', 26: 'OPTION_REQ', 27: 'OPTION_RES', 28: 'WORK_DATA', 29: 'WORK_WARNING', 30: 'GRAB_JOB_UNIQ', 31: 'JOB_ASSIGN_UNIQ', 32: 'SUBMIT_JOB_HIGH_BG', 33: 'SUBMIT_JOB_LOW', 34: 'SUBMIT_JOB_LOW_BG', 35: 'SUBMIT_JOB_SCHED', 36: 'SUBMIT_JOB_EPOCH', } for i, name in types.items(): globals()[name] = i __doc__ += '\n.. py:data:: %s\n' % name REQ = b'\x00REQ' RES = b'\x00RES' gear-0.4.0/gear/tests/0000775000175300017540000000000012207146270015647 5ustar jenkinsjenkins00000000000000gear-0.4.0/gear/tests/test_gear.py0000664000175300017540000000245612207146231020202 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import testscenarios import gear from gear import tests class ConnectionTestCase(tests.BaseTestCase): scenarios = [ ('both_string', dict(host="hostname", port='80')), ('string_int', dict(host="hostname", port=80)), ('none_string', dict(host=None, port="80")), ] def setUp(self): super(ConnectionTestCase, self).setUp() self.conn = gear.Connection(self.host, self.port) def test_params(self): self.assertTrue(repr(self.conn).endswith( 'host: %s port: %s>' % (self.host, self.port))) def load_tests(loader, in_tests, pattern): return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern) gear-0.4.0/gear/tests/__init__.py0000664000175300017540000000354712207146231017766 0ustar jenkinsjenkins00000000000000# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common utilities used in testing""" import os import fixtures import testresources import testtools TRUE_VALUES = ('true', '1', 'yes') class BaseTestCase(testtools.TestCase, testresources.ResourcedTestCase): def setUp(self): super(BaseTestCase, self).setUp() test_timeout = os.environ.get('OS_TEST_TIMEOUT', 30) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid, fail hard. print("OS_TEST_TIMEOUT set to invalid value" " defaulting to no timeout") test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) if os.environ.get('OS_STDOUT_CAPTURE') in TRUE_VALUES: stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if os.environ.get('OS_STDERR_CAPTURE') in TRUE_VALUES: stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.useFixture(fixtures.FakeLogger()) self.useFixture(fixtures.NestedTempfile()) gear-0.4.0/gear/__init__.py0000664000175300017540000024425112207146231016623 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os import select import socket import ssl import struct import threading import time import uuid as uuid_module from gear import constants try: import Queue as queue except ImportError: import queue as queue PRECEDENCE_NORMAL = 0 PRECEDENCE_LOW = 1 PRECEDENCE_HIGH = 2 class ConnectionError(Exception): pass class InvalidDataError(Exception): pass class ConfigurationError(Exception): pass class NoConnectedServersError(Exception): pass class UnknownJobError(Exception): pass class InterruptedError(Exception): pass class TimeoutError(Exception): pass class GearmanError(Exception): pass def convert_to_bytes(data): try: data = data.encode('utf8') except AttributeError: pass return data class Task(object): def __init__(self): self._wait_event = threading.Event() def setComplete(self): self._wait_event.set() def wait(self, timeout=None): """Wait for a response from Gearman. :arg int timeout: If not None, return after this many seconds if no response has been received (default: None). """ self._wait_event.wait(timeout) return self._wait_event.is_set() class SubmitJobTask(Task): def __init__(self, job): super(SubmitJobTask, self).__init__() self.job = job class OptionReqTask(Task): pass class Connection(object): """A Connection to a Gearman Server.""" log = logging.getLogger("gear.Connection") def __init__(self, host, port, ssl_key=None, ssl_cert=None, ssl_ca=None): self.host = host self.port = port self.ssl_key = ssl_key self.ssl_cert = ssl_cert self.ssl_ca = ssl_ca self.echo_lock = threading.Lock() self._init() def _init(self): self.conn = None self.connected = False self.connect_time = None self.related_jobs = {} self.pending_tasks = [] self.admin_requests = [] self.echo_conditions = {} self.options = set() self.changeState("INIT") def changeState(self, state): # The state variables are provided as a convenience (and used by # the Worker implementation). They aren't used or modified within # the connection object itself except to reset to "INIT" immediately # after reconnection. self.log.debug("Setting state to: %s" % state) self.state = state self.state_time = time.time() def __repr__(self): return '' % ( id(self), self.host, self.port) def connect(self): """Open a connection to the server. :raises ConnectionError: If unable to open the socket. """ self.log.debug("Connecting to %s port %s" % (self.host, self.port)) s = None for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: s = socket.socket(af, socktype, proto) except socket.error: s = None continue if all([self.ssl_key, self.ssl_cert, self.ssl_ca]): self.log.debug("Using SSL") s = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1, cert_reqs=ssl.CERT_REQUIRED, keyfile=self.ssl_key, certfile=self.ssl_cert, ca_certs=self.ssl_ca) try: s.connect(sa) except socket.error: s.close() s = None continue break if s is None: self.log.debug("Error connecting to %s port %s" % ( self.host, self.port)) raise ConnectionError("Unable to open socket") self.log.debug("Connected to %s port %s" % (self.host, self.port)) self.conn = s self.connected = True self.connect_time = time.time() def disconnect(self): """Disconnect from the server and remove all associated state data. """ if self.conn: try: self.conn.close() except Exception: pass self.log.debug("Disconnected from %s port %s" % (self.host, self.port)) self._init() def reconnect(self): """Disconnect from and reconnect to the server, removing all associated state data. """ self.disconnect() self.connect() def sendPacket(self, packet): """Send a packet to the server. :arg Packet packet: The :py:class:`Packet` to send. """ self.log.debug("Sending packet: %s" % packet) self.conn.send(packet.toBinary()) def _getAdminRequest(self): return self.admin_requests.pop(0) def readPacket(self): """Read one packet or administrative response from the server. Blocks until the complete packet or response is read. :returns: The :py:class:`Packet` or :py:class:`AdminRequest` read. :rtype: :py:class:`Packet` or :py:class:`AdminRequest` """ packet = b'' datalen = 0 code = None ptype = None admin = None admin_request = None while True: c = self.conn.recv(1) if not c: return None if admin is None: if c == b'\x00': admin = False else: admin = True admin_request = self._getAdminRequest() packet += c if admin: if admin_request.isComplete(packet): return admin_request else: if len(packet) == 12: code, ptype, datalen = struct.unpack('!4sii', packet) if len(packet) == datalen + 12: return Packet(code, ptype, packet[12:], connection=self) def sendAdminRequest(self, request, timeout=90): """Send an administrative request to the server. :arg AdminRequest request: The :py:class:`AdminRequest` to send. :arg numeric timeout: Number of seconds to wait until the response is received. If None, wait forever (default: 90 seconds). :raises TimeoutError: If the timeout is reached before the response is received. """ self.admin_requests.append(request) self.conn.send(request.getCommand()) complete = request.waitForResponse(timeout) if not complete: raise TimeoutError() def echo(self, data=None, timeout=30): """Perform an echo test on the server. This method waits until the echo response has been received or the timeout has been reached. :arg bytes data: The data to request be echoed. If None, a random unique byte string will be generated. :arg numeric timeout: Number of seconds to wait until the response is received. If None, wait forever (default: 30 seconds). :raises TimeoutError: If the timeout is reached before the response is received. """ if data is None: data = uuid_module.uuid4().hex.encode('utf8') self.echo_lock.acquire() try: if data in self.echo_conditions: raise InvalidDataError("This client is already waiting on an " "echo response of: %s" % data) condition = threading.Condition() self.echo_conditions[data] = condition finally: self.echo_lock.release() self.sendEchoReq(data) condition.acquire() condition.wait(timeout) condition.release() if data in self.echo_conditions: return data raise TimeoutError() def sendEchoReq(self, data): p = Packet(constants.REQ, constants.ECHO_REQ, data) self.sendPacket(p) def handleEchoRes(self, data): condition = None self.echo_lock.acquire() try: condition = self.echo_conditions.get(data) if condition: del self.echo_conditions[data] finally: self.echo_lock.release() if not condition: return False condition.notifyAll() return True def handleOptionRes(self, option): self.options.add(option) class AdminRequest(object): """Encapsulates a request (and response) sent over the administrative protocol. This is a base class that may not be instantiated dircectly; a subclass implementing a specific command must be used instead. :arg list arguments: A list of byte string arguments for the command. The following instance attributes are available: **response** (bytes) The response from the server. **arguments** (bytes) The argument supplied with the constructor. **command** (bytes) The administrative command. """ log = logging.getLogger("gear.AdminRequest") command = None arguments = [] response = None def __init__(self, *arguments): self.wait_event = threading.Event() self.arguments = arguments if type(self) == AdminRequest: raise NotImplementedError("AdminRequest must be subclassed") def __repr__(self): return '' % ( id(self), self.command) def getCommand(self): cmd = self.command if self.arguments: cmd += b' ' + b' '.join(self.arguments) cmd += b'\n' return cmd def isComplete(self, data): if (data[-3:] == b'\n.\n' or data[-5:] == b'\r\n.\r\n' or data == b'.\n' or data == b'.\r\n'): self.response = data return True return False def setComplete(self): self.wait_event.set() def waitForResponse(self, timeout=None): self.wait_event.wait(timeout) return self.wait_event.is_set() class StatusAdminRequest(AdminRequest): """A "status" administrative request. The response from gearman may be found in the **response** attribute. """ command = b'status' def __init__(self): super(StatusAdminRequest, self).__init__() class ShowJobsAdminRequest(AdminRequest): """A "show jobs" administrative request. The response from gearman may be found in the **response** attribute. """ command = b'show jobs' def __init__(self): super(ShowJobsAdminRequest, self).__init__() class ShowUniqueJobsAdminRequest(AdminRequest): """A "show unique jobs" administrative request. The response from gearman may be found in the **response** attribute. """ command = b'show unique jobs' def __init__(self): super(ShowUniqueJobsAdminRequest, self).__init__() class CancelJobAdminRequest(AdminRequest): """A "cancel job" administrative request. :arg str handle: The job handle to be canceled. The response from gearman may be found in the **response** attribute. """ command = b'cancel job' def __init__(self, handle): handle = convert_to_bytes(handle) super(CancelJobAdminRequest, self).__init__(handle) def isComplete(self, data): if data[-1:] == b'\n': self.response = data return True return False class VersionAdminRequest(AdminRequest): """A "version" administrative request. The response from gearman may be found in the **response** attribute. """ command = b'version' def __init__(self): super(VersionAdminRequest, self).__init__() def isComplete(self, data): if data[-1:] == b'\n': self.response = data return True return False class WorkersAdminRequest(AdminRequest): """A "workers" administrative request. The response from gearman may be found in the **response** attribute. """ command = b'workers' def __init__(self): super(WorkersAdminRequest, self).__init__() class Packet(object): """A data packet received from or to be sent over a :py:class:`Connection`. :arg bytes code: The Gearman magic code (:py:data:`constants.REQ` or :py:data:`constants.RES`) :arg bytes ptype: The packet type (one of the packet types in constasts). :arg bytes data: The data portion of the packet. :arg Connection connection: The connection on which the packet was received (optional). :raises InvalidDataError: If the magic code is unknown. """ log = logging.getLogger("gear.Packet") def __init__(self, code, ptype, data, connection=None): if not isinstance(code, bytes) and not isinstance(code, bytearray): raise TypeError("code must be of type bytes or bytearray") if code[0:1] != b'\x00': raise InvalidDataError("First byte of packet must be 0") self.code = code self.ptype = ptype if not isinstance(data, bytes) and not isinstance(data, bytearray): raise TypeError("data must be of type bytes or bytearray") self.data = data self.connection = connection def __repr__(self): ptype = constants.types.get(self.ptype, 'UNKNOWN') return '' % (id(self), ptype) def toBinary(self): """Return a Gearman wire protocol binary representation of the packet. :returns: The packet in binary form. :rtype: bytes """ b = struct.pack('!4sii', self.code, self.ptype, len(self.data)) b = bytearray(b) b += self.data return b def getArgument(self, index, last=False): """Get the nth argument from the packet data. :arg int index: The argument index to look up. :arg bool last: Whether this is the last argument (and thus nulls should be ignored) :returns: The argument value. :rtype: bytes """ parts = self.data.split(b'\x00') if not last: return parts[index] return b'\x00'.join(parts[index:]) def getJob(self): """Get the :py:class:`Job` associated with the job handle in this packet. :returns: The :py:class:`Job` for this packet. :rtype: Job :raises UnknownJobError: If the job is not known. """ handle = self.getArgument(0) job = self.connection.related_jobs.get(handle) if not job: raise UnknownJobError() return job class BaseClientServer(object): log = logging.getLogger("gear.BaseClientServer") def __init__(self): self.running = True self.active_connections = [] self.inactive_connections = [] self.connection_index = -1 # A lock and notification mechanism to handle not having any # current connections self.connections_condition = threading.Condition() # A pipe to wake up the poll loop in case it needs to restart self.wake_read, self.wake_write = os.pipe() self.poll_thread = threading.Thread(name="Gearman client poll", target=self._doPollLoop) self.poll_thread.daemon = True self.poll_thread.start() self.connect_thread = threading.Thread(name="Gearman client connect", target=self._doConnectLoop) self.connect_thread.daemon = True self.connect_thread.start() def _doConnectLoop(self): # Outer run method of the reconnection thread while self.running: self.connections_condition.acquire() while self.running and not self.inactive_connections: self.log.debug("Waiting for change in available servers " "to reconnect") self.connections_condition.wait() self.connections_condition.release() self.log.debug("Checking if servers need to be reconnected") try: if self.running and not self._connectLoop(): # Nothing happened time.sleep(2) except Exception: self.log.exception("Exception in connect loop:") def _connectLoop(self): # Inner method of the reconnection loop, triggered by # a connection change success = False for conn in self.inactive_connections[:]: self.log.debug("Trying to reconnect %s" % conn) try: conn.reconnect() except ConnectionError: self.log.debug("Unable to connect to %s" % conn) continue except Exception: self.log.exception("Exception while connecting to %s" % conn) continue try: self._onConnect(conn) except Exception: self.log.exception("Exception while performing on-connect " "tasks for %s" % conn) continue self.connections_condition.acquire() self.inactive_connections.remove(conn) self.active_connections.append(conn) self.connections_condition.notifyAll() os.write(self.wake_write, b'1\n') self.connections_condition.release() try: self._onActiveConnection(conn) except Exception: self.log.exception("Exception while performing active conn " "tasks for %s" % conn) success = True return success def _onConnect(self, conn): # Called immediately after a successful (re-)connection pass def _onActiveConnection(self, conn): # Called immediately after a connection is activated pass def _lostConnection(self, conn): # Called as soon as a connection is detected as faulty. Remove # it and return ASAP and let the connection thread deal with it. self.log.debug("Marking %s as disconnected" % conn) self.connections_condition.acquire() jobs = conn.related_jobs.values() self.active_connections.remove(conn) self.inactive_connections.append(conn) self.connections_condition.notifyAll() self.connections_condition.release() for job in jobs: self.handleDisconnect(job) def _doPollLoop(self): # Outer run method of poll thread. while self.running: self.connections_condition.acquire() while self.running and not self.active_connections: self.log.debug("Waiting for change in available connections " "to poll") self.connections_condition.wait() self.connections_condition.release() try: self._pollLoop() except Exception: self.log.exception("Exception in poll loop:") def _pollLoop(self): # Inner method of poll loop self.log.debug("Preparing to poll") poll = select.poll() bitmask = (select.POLLIN | select.POLLERR | select.POLLHUP | select.POLLNVAL) # Reverse mapping of fd -> connection conn_dict = {} for conn in self.active_connections: poll.register(conn.conn.fileno(), bitmask) conn_dict[conn.conn.fileno()] = conn # Register the wake pipe so that we can break if we need to # reconfigure connections poll.register(self.wake_read, bitmask) while self.running: self.log.debug("Polling %s connections" % len(self.active_connections)) ret = poll.poll() for fd, event in ret: if fd == self.wake_read: self.log.debug("Woken by pipe") while True: if os.read(self.wake_read, 1) == b'\n': break return conn = conn_dict[fd] if event & select.POLLIN: self.log.debug("Processing input on %s" % conn) p = conn.readPacket() if p: if isinstance(p, Packet): self.handlePacket(p) else: self.handleAdminRequest(p) else: self.log.debug("Received no data on %s" % conn) self._lostConnection(conn) return else: self.log.debug("Received error event on %s" % conn) self._lostConnection(conn) return def handlePacket(self, packet): """Handle a received packet. This method is called whenever a packet is received from any connection. It normally calls the handle method appropriate for the specific packet. :arg Packet packet: The :py:class:`Packet` that was received. """ self.log.debug("Received packet %s" % packet) if packet.ptype == constants.JOB_CREATED: self.handleJobCreated(packet) elif packet.ptype == constants.WORK_COMPLETE: self.handleWorkComplete(packet) elif packet.ptype == constants.WORK_FAIL: self.handleWorkFail(packet) elif packet.ptype == constants.WORK_EXCEPTION: self.handleWorkException(packet) elif packet.ptype == constants.WORK_DATA: self.handleWorkData(packet) elif packet.ptype == constants.WORK_WARNING: self.handleWorkWarning(packet) elif packet.ptype == constants.WORK_STATUS: self.handleWorkStatus(packet) elif packet.ptype == constants.STATUS_RES: self.handleStatusRes(packet) elif packet.ptype == constants.GET_STATUS: self.handleGetStatus(packet) elif packet.ptype == constants.JOB_ASSIGN_UNIQ: self.handleJobAssignUnique(packet) elif packet.ptype == constants.JOB_ASSIGN: self.handleJobAssign(packet) elif packet.ptype == constants.NO_JOB: self.handleNoJob(packet) elif packet.ptype == constants.NOOP: self.handleNoop(packet) elif packet.ptype == constants.SUBMIT_JOB: self.handleSubmitJob(packet) elif packet.ptype == constants.SUBMIT_JOB_BG: self.handleSubmitJobBg(packet) elif packet.ptype == constants.SUBMIT_JOB_HIGH: self.handleSubmitJobHigh(packet) elif packet.ptype == constants.SUBMIT_JOB_HIGH_BG: self.handleSubmitJobHighBg(packet) elif packet.ptype == constants.SUBMIT_JOB_LOW: self.handleSubmitJobLow(packet) elif packet.ptype == constants.SUBMIT_JOB_LOW_BG: self.handleSubmitJobLowBg(packet) elif packet.ptype == constants.SUBMIT_JOB_SCHED: self.handleSubmitJobSched(packet) elif packet.ptype == constants.SUBMIT_JOB_EPOCH: self.handleSubmitJobEpoch(packet) elif packet.ptype == constants.GRAB_JOB_UNIQ: self.handleGrabJobUniq(packet) elif packet.ptype == constants.GRAB_JOB: self.handleGrabJob(packet) elif packet.ptype == constants.PRE_SLEEP: self.handlePreSleep(packet) elif packet.ptype == constants.SET_CLIENT_ID: self.handleSetClientID(packet) elif packet.ptype == constants.CAN_DO: self.handleCanDo(packet) elif packet.ptype == constants.CAN_DO_TIMEOUT: self.handleCanDoTimeout(packet) elif packet.ptype == constants.CANT_DO: self.handleCantDo(packet) elif packet.ptype == constants.RESET_ABILITIES: self.handleResetAbilities(packet) elif packet.ptype == constants.ECHO_REQ: self.handleEchoReq(packet) elif packet.ptype == constants.ECHO_RES: self.handleEchoRes(packet) elif packet.ptype == constants.ERROR: self.handleError(packet) elif packet.ptype == constants.ALL_YOURS: self.handleAllYours(packet) elif packet.ptype == constants.OPTION_REQ: self.handleOptionReq(packet) elif packet.ptype == constants.OPTION_RES: self.handleOptionRes(packet) else: self.log.error("Received unknown packet: %s" % packet) def _defaultPacketHandler(self, packet): self.log.error("Received unhandled packet: %s" % packet) def handleJobCreated(self, packet): return self._defaultPacketHandler(packet) def handleWorkComplete(self, packet): return self._defaultPacketHandler(packet) def handleWorkFail(self, packet): return self._defaultPacketHandler(packet) def handleWorkException(self, packet): return self._defaultPacketHandler(packet) def handleWorkData(self, packet): return self._defaultPacketHandler(packet) def handleWorkWarning(self, packet): return self._defaultPacketHandler(packet) def handleWorkStatus(self, packet): return self._defaultPacketHandler(packet) def handleStatusRes(self, packet): return self._defaultPacketHandler(packet) def handleGetStatus(self, packet): return self._defaultPacketHandler(packet) def handleJobAssignUnique(self, packet): return self._defaultPacketHandler(packet) def handleJobAssign(self, packet): return self._defaultPacketHandler(packet) def handleNoJob(self, packet): return self._defaultPacketHandler(packet) def handleNoop(self, packet): return self._defaultPacketHandler(packet) def handleSubmitJob(self, packet): return self._defaultPacketHandler(packet) def handleSubmitJobBg(self, packet): return self._defaultPacketHandler(packet) def handleSubmitJobHigh(self, packet): return self._defaultPacketHandler(packet) def handleSubmitJobHighBg(self, packet): return self._defaultPacketHandler(packet) def handleSubmitJobLow(self, packet): return self._defaultPacketHandler(packet) def handleSubmitJobLowBg(self, packet): return self._defaultPacketHandler(packet) def handleSubmitJobSched(self, packet): return self._defaultPacketHandler(packet) def handleSubmitJobEpoch(self, packet): return self._defaultPacketHandler(packet) def handleGrabJobUniq(self, packet): return self._defaultPacketHandler(packet) def handleGrabJob(self, packet): return self._defaultPacketHandler(packet) def handlePreSleep(self, packet): return self._defaultPacketHandler(packet) def handleSetClientID(self, packet): return self._defaultPacketHandler(packet) def handleCanDo(self, packet): return self._defaultPacketHandler(packet) def handleCanDoTimeout(self, packet): return self._defaultPacketHandler(packet) def handleCantDo(self, packet): return self._defaultPacketHandler(packet) def handleResetAbilities(self, packet): return self._defaultPacketHandler(packet) def handleEchoReq(self, packet): return self._defaultPacketHandler(packet) def handleEchoRes(self, packet): return self._defaultPacketHandler(packet) def handleError(self, packet): return self._defaultPacketHandler(packet) def handleAllYours(self, packet): return self._defaultPacketHandler(packet) def handleOptionReq(self, packet): return self._defaultPacketHandler(packet) def handleOptionRes(self, packet): return self._defaultPacketHandler(packet) def handleAdminRequest(self, request): """Handle an administrative command response from Gearman. This method is called whenever a response to a previously issued administrative command is received from one of this client's connections. It normally releases the wait lock on the initiating AdminRequest object. :arg AdminRequest request: The :py:class:`AdminRequest` that initiated the received response. """ self.log.debug("Received admin data %s" % request) request.setComplete() def shutdown(self): """Close all connections and stop all running threads. The object may no longer be used after shutdown is called. """ self.log.debug("Beginning shutdown") self._shutdown() self.log.debug("Beginning cleanup") self._cleanup() self.log.debug("Finished shutdown") def _shutdown(self): # The first part of the shutdown process where all threads # are told to exit. self.running = False self.connections_condition.acquire() self.connections_condition.notifyAll() os.write(self.wake_write, b'1\n') self.connections_condition.release() def _cleanup(self): # The second part of the shutdown process where we wait for all # threads to exit and then clean up. self.poll_thread.join() self.connect_thread.join() for connection in self.active_connections: connection.disconnect() self.active_connections = [] self.inactive_connections = [] class BaseClient(BaseClientServer): def __init__(self): super(BaseClient, self).__init__() # A lock to use when sending packets that set the state across # all known connections. Note that it doesn't necessarily need # to be used for all broadcasts, only those that affect multi- # connection state, such as setting options or functions. self.broadcast_lock = threading.RLock() def addServer(self, host, port=4730, ssl_key=None, ssl_cert=None, ssl_ca=None): """Add a server to the client's connection pool. Any number of Gearman servers may be added to a client. The client will connect to all of them and send jobs to them in a round-robin fashion. When servers are disconnected, the client will automatically remove them from the pool, continuously try to reconnect to them, and return them to the pool when reconnected. New servers may be added at any time. This is a non-blocking call that will return regardless of whether the initial connection succeeded. If you need to ensure that a connection is ready before proceeding, see :py:meth:`waitForServer`. When using SSL connections, all SSL files must be specified. :arg str host: The hostname or IP address of the server. :arg int port: The port on which the gearman server is listening. :arg str ssl_key: Path to the SSL private key. :arg str ssl_cert: Path to the SSL certificate. :arg str ssl_ca: Path to the CA certificate. :raises ConfigurationError: If the host/port combination has already been added to the client. """ self.log.debug("Adding server %s port %s" % (host, port)) self.connections_condition.acquire() try: for conn in self.active_connections + self.inactive_connections: if conn.host == host and conn.port == port: raise ConfigurationError("Host/port already specified") conn = Connection(host, port, ssl_key, ssl_cert, ssl_ca) self.inactive_connections.append(conn) self.connections_condition.notifyAll() finally: self.connections_condition.release() def waitForServer(self): """Wait for at least one server to be connected. Block until at least one gearman server is connected. """ connected = False while self.running: self.connections_condition.acquire() while self.running and not self.active_connections: self.log.debug("Waiting for at least one active connection") self.connections_condition.wait() if self.active_connections: self.log.debug("Active connection found") connected = True self.connections_condition.release() if connected: return def getConnection(self): """Return a connected server. Finds the next scheduled connected server in the round-robin rotation and returns it. It is not usually necessary to use this method external to the library, as more consumer-oriented methods such as submitJob already use it internally, but is available nonetheless if necessary. :returns: The next scheduled :py:class:`Connection` object. :rtype: :py:class:`Connection` :raises NoConnectedServersError: If there are not currently connected servers. """ conn = None try: self.connections_condition.acquire() if not self.active_connections: raise NoConnectedServersError("No connected Gearman servers") self.connection_index += 1 if self.connection_index >= len(self.active_connections): self.connection_index = 0 conn = self.active_connections[self.connection_index] finally: self.connections_condition.release() return conn def broadcast(self, packet): """Send a packet to all currently connected servers. :arg Packet packet: The :py:class:`Packet` to send. """ connections = self.active_connections[:] for connection in connections: try: self.sendPacket(packet, connection) except Exception: # Error handling is all done by sendPacket pass def sendPacket(self, packet, connection): """Send a packet to a single connection, removing it from the list of active connections if that fails. :arg Packet packet: The :py:class:`Packet` to send. :arg Connection connection: The :py:class:`Connection` on which to send the packet. """ try: connection.sendPacket(packet) return except Exception: self.log.exception("Exception while sending packet %s to %s" % (packet, connection)) # If we can't send the packet, discard the connection self._lostConnection(connection) raise def handleEchoRes(self, packet): """Handle an ECHO_RES packet. Causes the blocking :py:meth:`Connection.echo` invocation to return. :arg Packet packet: The :py:class:`Packet` that was received. :returns: None """ packet.connection.handleEchoRes(packet.getArgument(0, True)) def handleError(self, packet): """Handle an ERROR packet. Logs the error. :arg Packet packet: The :py:class:`Packet` that was received. :returns: None """ self.log.error("Received ERROR packet: %s: %s" % (packet.getArgument(0), packet.getArgument(1))) try: task = packet.connection.pending_tasks.pop(0) task.setComplete() except Exception: self.log.exception("Exception while handling error packet:") self._lostConnection(packet.connection) class Client(BaseClient): """A Gearman client. You may wish to subclass this class in order to override the default event handlers to react to Gearman events. Be sure to call the superclass event handlers so that they may perform job-related housekeeping. """ log = logging.getLogger("gear.Client") def __init__(self): super(Client, self).__init__() self.options = set() def __repr__(self): return '' % id(self) def _onConnect(self, conn): # Called immediately after a successful (re-)connection self.broadcast_lock.acquire() try: super(Client, self)._onConnect(conn) for name in self.options: self._setOptionConnection(name, conn) finally: self.broadcast_lock.release() def _setOptionConnection(self, name, conn): # Set an option on a connection packet = Packet(constants.REQ, constants.OPTION_REQ, name) task = OptionReqTask() try: conn.pending_tasks.append(task) self.sendPacket(packet, conn) except Exception: # Error handling is all done by sendPacket task = None return task def setOption(self, name, timeout=30): """Set an option for all connections. :arg str name: The option name to set. :arg int timeout: How long to wait (in seconds) for a response from the server before giving up (default: 30 seconds). :returns: True if the option was set on all connections, otherwise False :rtype: bool """ tasks = {} name = convert_to_bytes(name) self.broadcast_lock.acquire() try: self.options.add(name) connections = self.active_connections[:] for connection in connections: task = self._setOptionConnection(name, connection) if task: tasks[task] = connection finally: self.broadcast_lock.release() success = True for task in tasks.keys(): complete = task.wait(timeout) conn = tasks[task] if not complete: self.log.error("Connection %s timed out waiting for a " "response to an option request: %s" % (conn, name)) self._lostConnection(conn) continue if name not in conn.options: success = False return success def submitJob(self, job, background=False, precedence=PRECEDENCE_NORMAL, timeout=30): """Submit a job to a Gearman server. Submits the provided job to the next server in this client's round-robin connection pool. If the job is a foreground job, updates will be made to the supplied :py:class:`Job` object as they are received. :arg Job job: The :py:class:`Job` to submit. :arg bool background: Whether the job should be backgrounded. :arg int precedence: Whether the job should have normal, low, or high precedence. One of :py:data:`PRECEDENCE_NORMAL`, :py:data:`PRECEDENCE_LOW`, or :py:data:`PRECEDENCE_HIGH` :arg int timeout: How long to wait (in seconds) for a response from the server before giving up (default: 30 seconds). :raises ConfigurationError: If an invalid precendence value is supplied. """ if job.unique is None: unique = b'' else: unique = job.unique data = b'\x00'.join((job.name, unique, job.arguments)) if background: if precedence == PRECEDENCE_NORMAL: cmd = constants.SUBMIT_JOB_BG elif precedence == PRECEDENCE_LOW: cmd = constants.SUBMIT_JOB_LOW_BG elif precedence == PRECEDENCE_HIGH: cmd = constants.SUBMIT_JOB_HIGH_BG else: raise ConfigurationError("Invalid precedence value") else: if precedence == PRECEDENCE_NORMAL: cmd = constants.SUBMIT_JOB elif precedence == PRECEDENCE_LOW: cmd = constants.SUBMIT_JOB_LOW elif precedence == PRECEDENCE_HIGH: cmd = constants.SUBMIT_JOB_HIGH else: raise ConfigurationError("Invalid precedence value") packet = Packet(constants.REQ, cmd, data) while True: conn = self.getConnection() task = SubmitJobTask(job) conn.pending_tasks.append(task) try: self.sendPacket(packet, conn) except Exception: # Error handling is all done by sendPacket continue complete = task.wait(timeout) if not complete: self.log.error("Connection %s timed out waiting for a " "response to a submit job request: %s" % (conn, job)) self._lostConnection(conn) continue if not job.handle: self.log.error("Connection %s sent an error in " "response to a submit job request: %s" % (conn, job)) continue job.connection = conn return raise GearmanError("Unable to submit job to any connected servers") def handleJobCreated(self, packet): """Handle a JOB_CREATED packet. Updates the appropriate :py:class:`Job` with the newly returned job handle. :arg Packet packet: The :py:class:`Packet` that was received. :returns: The :py:class:`Job` object associated with the job request. :rtype: :py:class:`Job` """ task = packet.connection.pending_tasks.pop(0) if not isinstance(task, SubmitJobTask): msg = ("Unexpected response received to submit job " "request: %s" % packet) self.log.error(msg) self._lostConnection(packet.connection) raise GearmanError(msg) job = task.job job.handle = packet.data packet.connection.related_jobs[job.handle] = job task.setComplete() self.log.debug("Job created; handle: %s" % job.handle) return job def handleWorkComplete(self, packet): """Handle a WORK_COMPLETE packet. Updates the referenced :py:class:`Job` with the returned data and removes it from the list of jobs associated with the connection. :arg Packet packet: The :py:class:`Packet` that was received. :returns: The :py:class:`Job` object associated with the job request. :rtype: :py:class:`Job` """ job = packet.getJob() data = packet.getArgument(1, True) if data: job.data.append(data) job.complete = True job.failure = False del packet.connection.related_jobs[job.handle] self.log.debug("Job complete; handle: %s data: %s" % (job.handle, job.data)) return job def handleWorkFail(self, packet): """Handle a WORK_FAIL packet. Updates the referenced :py:class:`Job` with the returned data and removes it from the list of jobs associated with the connection. :arg Packet packet: The :py:class:`Packet` that was received. :returns: The :py:class:`Job` object associated with the job request. :rtype: :py:class:`Job` """ job = packet.getJob() job.complete = True job.failure = True del packet.connection.related_jobs[job.handle] self.log.debug("Job failed; handle: %s" % job.handle) return job def handleWorkException(self, packet): """Handle a WORK_Exception packet. Updates the referenced :py:class:`Job` with the returned data and removes it from the list of jobs associated with the connection. :arg Packet packet: The :py:class:`Packet` that was received. :returns: The :py:class:`Job` object associated with the job request. :rtype: :py:class:`Job` """ job = packet.getJob() job.exception = packet.getArgument(1, True) job.complete = True job.failure = True del packet.connection.related_jobs[job.handle] self.log.debug("Job exception; handle: %s data: %s" % (job.handle, job.exception)) return job def handleWorkData(self, packet): """Handle a WORK_DATA packet. Updates the referenced :py:class:`Job` with the returned data. :arg Packet packet: The :py:class:`Packet` that was received. :returns: The :py:class:`Job` object associated with the job request. :rtype: :py:class:`Job` """ job = packet.getJob() data = packet.getArgument(1, True) if data: job.data.append(data) self.log.debug("Job data; handle: %s data: %s" % (job.handle, job.data)) return job def handleWorkWarning(self, packet): """Handle a WORK_WARNING packet. Updates the referenced :py:class:`Job` with the returned data. :arg Packet packet: The :py:class:`Packet` that was received. :returns: The :py:class:`Job` object associated with the job request. :rtype: :py:class:`Job` """ job = packet.getJob() data = packet.getArgument(1, True) if data: job.data.append(data) job.warning = True self.log.debug("Job warning; handle: %s data: %s" % (job.handle, job.data)) return job def handleWorkStatus(self, packet): """Handle a WORK_STATUS packet. Updates the referenced :py:class:`Job` with the returned data. :arg Packet packet: The :py:class:`Packet` that was received. :returns: The :py:class:`Job` object associated with the job request. :rtype: :py:class:`Job` """ job = packet.getJob() job.numerator = packet.getArgument(1) job.denominator = packet.getArgument(2) try: job.fraction_complete = (float(job.numerator) / float(job.denominator)) except Exception: job.fraction_complete = None self.log.debug("Job status; handle: %s complete: %s/%s" % (job.handle, job.numerator, job.denominator)) return job def handleStatusRes(self, packet): """Handle a STATUS_RES packet. Updates the referenced :py:class:`Job` with the returned data. :arg Packet packet: The :py:class:`Packet` that was received. :returns: The :py:class:`Job` object associated with the job request. :rtype: :py:class:`Job` """ job = packet.getJob() job.known = (packet.getArgument(1) == 1) job.running = (packet.getArgument(2) == 1) job.numerator = packet.getArgument(3) job.denominator = packet.getArgument(4) try: job.fraction_complete = (float(job.numerator) / float(job.denominator)) except Exception: job.fraction_complete = None return job def handleOptionRes(self, packet): """Handle an OPTION_RES packet. Updates the set of options for the connection. :arg Packet packet: The :py:class:`Packet` that was received. :returns: None. """ task = packet.connection.pending_tasks.pop(0) if not isinstance(task, OptionReqTask): msg = ("Unexpected response received to option " "request: %s" % packet) self.log.error(msg) self._lostConnection(packet.connection) raise GearmanError(msg) packet.connection.handleOptionRes(packet.getArgument(0)) task.setComplete() def handleDisconnect(self, job): """Handle a Gearman server disconnection. If the Gearman server is disconnected, this will be called for any jobs currently associated with the server. :arg Job packet: The :py:class:`Job` that was running when the server disconnected. """ return job class FunctionRecord(object): """Represents a function that should be registered with Gearman. This class only directly needs to be instatiated for use with :py:meth:`Worker.setFunctions`. If a timeout value is supplied, the function will be registered with CAN_DO_TIMEOUT. :arg str name: The name of the function to register. :arg numeric timeout: The timeout value (optional). """ def __init__(self, name, timeout=None): self.name = name self.timeout = timeout def __repr__(self): return '' % ( id(self), self.name, self.timeout) class Worker(BaseClient): """A Gearman worker. :arg str worker_id: The worker ID to provide to Gearman (will appear in administrative command output). """ log = logging.getLogger("gear.Worker") def __init__(self, worker_id): self.worker_id = convert_to_bytes(worker_id) self.functions = {} self.job_lock = threading.Lock() self.waiting_for_jobs = 0 self.job_queue = queue.Queue() super(Worker, self).__init__() def __repr__(self): return '' % id(self) def registerFunction(self, name, timeout=None): """Register a function with Gearman. If a timeout value is supplied, the function will be registered with CAN_DO_TIMEOUT. :arg str name: The name of the function to register. :arg numeric timeout: The timeout value (optional). """ name = convert_to_bytes(name) self.functions[name] = FunctionRecord(name, timeout) if timeout: self._sendCanDoTimeout(name, timeout) else: self._sendCanDo(name) def unRegisterFunction(self, name): """Remove a function from Gearman's registry. :arg str name: The name of the function to remove. """ name = convert_to_bytes(name) del self.functions[name] self._sendCantDo(name) def setFunctions(self, functions): """Replace the set of functions registered with Gearman. Accepts a list of :py:class:`FunctionRecord` objects which represents the complete set of functions that should be registered with Gearman. Any existing functions will be unregistered and these registered in their place. If the empty list is supplied, then the Gearman registered function set will be cleared. :arg list functions: A list of :py:class:`FunctionRecord` objects. """ self._sendResetAbilities() self.functions = {} for f in functions: if not isinstance(f, FunctionRecord): raise InvalidDataError( "An iterable of FunctionRecords is required.") self.functions[f.name] = f for f in self.functions.values(): if f.timeout: self._sendCanDoTimeout(f.name, f.timeout) else: self._sendCanDo(f.name) def _sendCanDo(self, name): self.broadcast_lock.acquire() try: p = Packet(constants.REQ, constants.CAN_DO, name) self.broadcast(p) finally: self.broadcast_lock.release() def _sendCanDoTimeout(self, name, timeout): self.broadcast_lock.acquire() try: data = name + b'\x00' + timeout p = Packet(constants.REQ, constants.CAN_DO_TIMEOUT, data) self.broadcast(p) finally: self.broadcast_lock.release() def _sendCantDo(self, name): self.broadcast_lock.acquire() try: p = Packet(constants.REQ, constants.CANT_DO, name) self.broadcast(p) finally: self.broadcast_lock.release() def _sendResetAbilities(self): self.broadcast_lock.acquire() try: p = Packet(constants.REQ, constants.RESET_ABILITIES, b'') self.broadcast(p) finally: self.broadcast_lock.release() def _sendPreSleep(self, connection): p = Packet(constants.REQ, constants.PRE_SLEEP, b'') self.sendPacket(p, connection) def _sendGrabJobUniq(self, connection=None): p = Packet(constants.REQ, constants.GRAB_JOB_UNIQ, b'') if connection: self.sendPacket(p, connection) else: self.broadcast(p) def _onConnect(self, conn): self.broadcast_lock.acquire() try: # Called immediately after a successful (re-)connection p = Packet(constants.REQ, constants.SET_CLIENT_ID, self.worker_id) conn.sendPacket(p) super(Worker, self)._onConnect(conn) for f in self.functions.values(): if f.timeout: data = f.name + b'\x00' + f.timeout p = Packet(constants.REQ, constants.CAN_DO_TIMEOUT, data) else: p = Packet(constants.REQ, constants.CAN_DO, f.name) conn.sendPacket(p) conn.changeState("IDLE") finally: self.broadcast_lock.release() # Any exceptions will be handled by the calling function, and the # connection will not be put into the pool. def _onActiveConnection(self, conn): self.job_lock.acquire() try: if self.waiting_for_jobs > 0: self._updateStateMachines() finally: self.job_lock.release() def _updateStateMachines(self): connections = self.active_connections[:] for connection in connections: if (connection.state == "IDLE" and self.waiting_for_jobs > 0): self._sendGrabJobUniq(connection) connection.changeState("GRAB_WAIT") if (connection.state != "IDLE" and self.waiting_for_jobs < 1): connection.changeState("IDLE") def getJob(self): """Get a job from Gearman. Blocks until a job is received. This method is re-entrant, so it is safe to call this method on a single worker from multiple threads. In that case, one of them at random will receive the job assignment. :returns: The :py:class:`WorkerJob` assigned. :rtype: :py:class:`WorkerJob`. :raises InterruptedError: If interrupted (by :py:meth:`stopWaitingForJobs`) before a job is received. """ self.job_lock.acquire() try: self.waiting_for_jobs += 1 self.log.debug("Get job; number of threads waiting for jobs: %s" % self.waiting_for_jobs) try: job = self.job_queue.get(False) except queue.Empty: job = None if not job: self._updateStateMachines() finally: self.job_lock.release() if not job: job = self.job_queue.get() self.log.debug("Received job: %s" % job) if job is None: raise InterruptedError() return job def stopWaitingForJobs(self): """Interrupts all running :py:meth:`getJob` calls, which will raise an exception. """ self.job_lock.acquire() while True: connections = self.active_connections[:] now = time.time() ok = True for connection in connections: if connection.state == "GRAB_WAIT": # Replies to GRAB_JOB should be fast, give up if we've # been waiting for more than 5 seconds. if now - connection.state_time > 5: self._lostConnection(connection) else: ok = False if ok: break else: self.job_lock.release() time.sleep(0.1) self.job_lock.acquire() while self.waiting_for_jobs > 0: self.waiting_for_jobs -= 1 self.job_queue.put(None) self._updateStateMachines() self.job_lock.release() def _shutdown(self): super(Worker, self)._shutdown() self.stopWaitingForJobs() def handleNoop(self, packet): """Handle a NOOP packet. Sends a GRAB_JOB_UNIQ packet on the same connection. GRAB_JOB_UNIQ will return jobs regardless of whether they have been specified with a unique identifier when submitted. If they were not, then :py:attr:`WorkerJob.unique` attribute will be None. :arg Packet packet: The :py:class:`Packet` that was received. """ self.job_lock.acquire() try: if packet.connection.state == "SLEEP": self.log.debug("Sending GRAB_JOB_UNIQ") self._sendGrabJobUniq(packet.connection) packet.connection.changeState("GRAB_WAIT") else: self.log.debug("Received unexpecetd NOOP packet on %s" % packet.connection) finally: self.job_lock.release() def handleNoJob(self, packet): """Handle a NO_JOB packet. Sends a PRE_SLEEP packet on the same connection. :arg Packet packet: The :py:class:`Packet` that was received. """ self.job_lock.acquire() try: if packet.connection.state == "GRAB_WAIT": self.log.debug("Sending PRE_SLEEP") self._sendPreSleep(packet.connection) packet.connection.changeState("SLEEP") else: self.log.debug("Received unexpected NO_JOB packet on %s" % packet.connection) finally: self.job_lock.release() def handleJobAssign(self, packet): """Handle a JOB_ASSIGN packet. Adds a WorkerJob to the internal queue to be picked up by any threads waiting in :py:meth:`getJob`. :arg Packet packet: The :py:class:`Packet` that was received. """ handle = packet.getArgument(0) name = packet.getArgument(1) arguments = packet.getArgument(2, True) return self._handleJobAssignment(packet, handle, name, arguments, None) def handleJobAssignUnique(self, packet): """Handle a JOB_ASSIGN_UNIQ packet. Adds a WorkerJob to the internal queue to be picked up by any threads waiting in :py:meth:`getJob`. :arg Packet packet: The :py:class:`Packet` that was received. """ handle = packet.getArgument(0) name = packet.getArgument(1) unique = packet.getArgument(2) if unique == b'': unique = None arguments = packet.getArgument(3, True) return self._handleJobAssignment(packet, handle, name, arguments, unique) def _handleJobAssignment(self, packet, handle, name, arguments, unique): job = WorkerJob(handle, name, arguments, unique) job.connection = packet.connection self.job_lock.acquire() try: packet.connection.changeState("IDLE") self.waiting_for_jobs -= 1 self.log.debug("Job assigned; number of threads waiting for " "jobs: %s" % self.waiting_for_jobs) self.job_queue.put(job) self._updateStateMachines() finally: self.job_lock.release() class BaseJob(object): log = logging.getLogger("gear.Job") def __init__(self, name, arguments, unique=None, handle=None): self.name = convert_to_bytes(name) if (not isinstance(arguments, bytes) and not isinstance(arguments, bytearray)): raise TypeError("arguments must be of type bytes or bytearray") self.arguments = arguments self.unique = convert_to_bytes(unique) self.handle = handle self.connection = None def __repr__(self): return '' % ( id(self), self.handle, self.name, self.unique) class Job(BaseJob): """A job to run or being run by Gearman. :arg str name: The name of the job. :arg bytes arguments: The opaque data blob to be passed to the worker as arguments. :arg str unique: A byte string to uniquely identify the job to Gearman (optional). The following instance attributes are available: **name** (str) The name of the job. **arguments** (bytes) The opaque data blob passed to the worker as arguments. **unique** (str or None) The unique ID of the job (if supplied). **handle** (bytes or None) The Gearman job handle. None if no job handle has been received yet. **data** (list of byte-arrays) The result data returned from Gearman. Each packet appends an element to the list. Depending on the nature of the data, the elements may need to be concatenated before use. **exception** (bytes or None) Exception information returned from Gearman. None if no exception has been received. **warning** (bool) Whether the worker has reported a warning. **complete** (bool) Whether the job is complete. **failure** (bool) Whether the job has failed. Only set when complete is True. **numerator** (bytes or None) The numerator of the completion ratio reported by the worker. Only set when a status update is sent by the worker. **denominator** (bytes or None) The denominator of the completion ratio reported by the worker. Only set when a status update is sent by the worker. **fraction_complete** (float or None) The fractional complete ratio reported by the worker. Only set when a status update is sent by the worker. **known** (bool or None) Whether the job is known to Gearman. Only set by handleStatusRes() in response to a getStatus() query. **running** (bool or None) Whether the job is running. Only set by handleStatusRes() in response to a getStatus() query. **connection** (:py:class:`Connection` or None) The connection associated with the job. Only set after the job has been submitted to a Gearman server. """ log = logging.getLogger("gear.Job") def __init__(self, name, arguments, unique=None): super(Job, self).__init__(name, arguments, unique) self.data = [] self.exception = None self.warning = False self.complete = False self.failure = False self.numerator = None self.denominator = None self.fraction_complete = None self.known = None self.running = None class WorkerJob(BaseJob): """A job that Gearman has assigned to a Worker. Not intended to be instantiated directly, but rather returned by :py:meth:`Worker.getJob`. :arg str handle: The job handle assigned by gearman. :arg str name: The name of the job. :arg bytes arguments: The opaque data blob passed to the worker as arguments. :arg str unique: A byte string to uniquely identify the job to Gearman (optional). The following instance attributes are available: **name** (str) The name of the job. **arguments** (bytes) The opaque data blob passed to the worker as arguments. **unique** (str or None) The unique ID of the job (if supplied). **handle** (bytes) The Gearman job handle. **connection** (:py:class:`Connection` or None) The connection associated with the job. Only set after the job has been submitted to a Gearman server. """ log = logging.getLogger("gear.WorkerJob") def __init__(self, handle, name, arguments, unique=None): super(WorkerJob, self).__init__(name, arguments, unique, handle) def sendWorkData(self, data=b''): """Send a WORK_DATA packet to the client. :arg bytes data: The data to be sent to the client (optional). """ data = self.handle + b'\x00' + data p = Packet(constants.REQ, constants.WORK_DATA, data) self.connection.sendPacket(p) def sendWorkWarning(self, data=b''): """Send a WORK_WARNING packet to the client. :arg bytes data: The data to be sent to the client (optional). """ data = self.handle + b'\x00' + data p = Packet(constants.REQ, constants.WORK_WARNING, data) self.connection.sendPacket(p) def sendWorkStatus(self, numerator, denominator): """Send a WORK_STATUS packet to the client. Sends a numerator and denominator that together represent the fraction complete of the job. :arg numeric numerator: The numerator of the fraction complete. :arg numeric denominator: The denominator of the fraction complete. """ data = (self.handle + b'\x00' + str(numerator).encode('utf8') + b'\x00' + str(denominator).encode('utf8')) p = Packet(constants.REQ, constants.WORK_STATUS, data) self.connection.sendPacket(p) def sendWorkComplete(self, data=b''): """Send a WORK_COMPLETE packet to the client. :arg bytes data: The data to be sent to the client (optional). """ data = self.handle + b'\x00' + data p = Packet(constants.REQ, constants.WORK_COMPLETE, data) self.connection.sendPacket(p) def sendWorkFail(self): "Send a WORK_FAIL packet to the client." p = Packet(constants.REQ, constants.WORK_FAIL, self.handle) self.connection.sendPacket(p) def sendWorkException(self, data=b''): """Send a WORK_EXCEPTION packet to the client. :arg bytes data: The exception data to be sent to the client (optional). """ data = self.handle + b'\x00' + data p = Packet(constants.REQ, constants.WORK_EXCEPTION, data) self.connection.sendPacket(p) # Below are classes for use in the server implementation: class ServerJob(Job): """A job record for use in a server. :arg str name: The name of the job. :arg bytes arguments: The opaque data blob to be passed to the worker as arguments. :arg str unique: A byte string to uniquely identify the job to Gearman (optional). The following instance attributes are available: **name** (str) The name of the job. **arguments** (bytes) The opaque data blob passed to the worker as arguments. **unique** (str or None) The unique ID of the job (if supplied). **handle** (bytes or None) The Gearman job handle. None if no job handle has been received yet. **data** (list of byte-arrays) The result data returned from Gearman. Each packet appends an element to the list. Depending on the nature of the data, the elements may need to be concatenated before use. **exception** (bytes or None) Exception information returned from Gearman. None if no exception has been received. **warning** (bool) Whether the worker has reported a warning. **complete** (bool) Whether the job is complete. **failure** (bool) Whether the job has failed. Only set when complete is True. **numerator** (bytes or None) The numerator of the completion ratio reported by the worker. Only set when a status update is sent by the worker. **denominator** (bytes or None) The denominator of the completion ratio reported by the worker. Only set when a status update is sent by the worker. **fraction_complete** (float or None) The fractional complete ratio reported by the worker. Only set when a status update is sent by the worker. **known** (bool or None) Whether the job is known to Gearman. Only set by handleStatusRes() in response to a getStatus() query. **running** (bool or None) Whether the job is running. Only set by handleStatusRes() in response to a getStatus() query. **client_connection** :py:class:`Connection` The client connection associated with the job. **worker_connection** (:py:class:`Connection` or None) The worker connection associated with the job. Only set after the job has been assigned to a worker. """ log = logging.getLogger("gear.ServerJob") def __init__(self, handle, name, arguments, client_connection, unique=None): super(ServerJob, self).__init__(name, arguments, unique) self.handle = handle self.client_connection = client_connection self.worker_connection = None del self.connection class ServerAdminRequest(AdminRequest): """An administrative request sent to a server.""" def __init__(self, connection): super(ServerAdminRequest, self).__init__() self.connection = connection def isComplete(self, data): if data[-1:] == b'\n': self.command = data.strip() return True return False class ServerConnection(Connection): """A Connection to a Gearman Client.""" def __init__(self, addr, conn): self.host = addr[0] self.port = addr[1] self.conn = conn self.client_id = None self.functions = set() self.related_jobs = {} self.changeState("INIT") def _getAdminRequest(self): return ServerAdminRequest(self) def __repr__(self): if self.client_id: name = self.client_id else: name = '0x%x' % id(self) return '' % ( name, self.host, self.port) class Server(BaseClientServer): """A simple gearman server implementation for testing (not for production use). :arg int port: The TCP port on which to listen. """ def __init__(self, port=4730, ssl_key=None, ssl_cert=None, ssl_ca=None): self.port = port self.ssl_key = ssl_key self.ssl_cert = ssl_cert self.ssl_ca = ssl_ca self.high_queue = [] self.normal_queue = [] self.low_queue = [] self.jobs = {} self.functions = set() self.max_handle = 0 self.connect_wake_read, self.connect_wake_write = os.pipe() for res in socket.getaddrinfo(None, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE): af, socktype, proto, canonname, sa = res try: self.socket = socket.socket(af, socktype, proto) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except socket.error: self.socket = None continue try: self.socket.bind(sa) self.socket.listen(1) except socket.error: self.socket.close() self.socket = None continue break if self.socket is None: raise Exception("Could not open socket") if port == 0: self.port = self.socket.getsockname()[1] super(Server, self).__init__() def _doConnectLoop(self): while self.running: try: self.connectLoop() except Exception: self.log.exception("Exception in connect loop:") def connectLoop(self): poll = select.poll() bitmask = (select.POLLIN | select.POLLERR | select.POLLHUP | select.POLLNVAL) # Register the wake pipe so that we can break if we need to # shutdown. poll.register(self.connect_wake_read, bitmask) poll.register(self.socket.fileno(), bitmask) while self.running: ret = poll.poll() for fd, event in ret: if fd == self.connect_wake_read: self.log.debug("Accept woken by pipe") while True: if os.read(self.connect_wake_read, 1) == b'\n': break return if event & select.POLLIN: self.log.debug("Accepting new connection") c, addr = self.socket.accept() self.log.debug("Accepted new connection") if all([self.ssl_key, self.ssl_cert, self.ssl_ca]): c = ssl.wrap_socket(c, server_side=True, keyfile=self.ssl_key, certfile=self.ssl_cert, ca_certs=self.ssl_ca, cert_reqs=ssl.CERT_REQUIRED, ssl_version=ssl.PROTOCOL_TLSv1) conn = ServerConnection(addr, c) self.connections_condition.acquire() self.active_connections.append(conn) self.connections_condition.notifyAll() os.write(self.wake_write, b'1\n') self.connections_condition.release() def _shutdown(self): super(Server, self)._shutdown() os.write(self.connect_wake_write, b'1\n') def _cleanup(self): super(Server, self)._cleanup() self.socket.close() def _lostConnection(self, conn): # Called as soon as a connection is detected as faulty. self.log.debug("Marking %s as disconnected" % conn) self.connections_condition.acquire() jobs = conn.related_jobs.values() self.active_connections.remove(conn) self.connections_condition.notifyAll() self.connections_condition.release() for job in jobs: if job.worker_connection == conn: # the worker disconnected, alert the client try: p = Packet(constants.REQ, constants.WORK_FAIL, job.handle) job.client_connection.sendPacket(p) except Exception: self.log.exception("Sending WORK_FAIL to client after " "worker disconnect failed:") del job.client_connection.related_jobs[job.handle] if job.worker_connection: del job.worker_connection.related_jobs[job.handle] del self.jobs[job.handle] def getQueue(self): """Returns a copy of all internal queues in a flattened form. :returns: The Gearman queue. :rtype: list of :py:class:`WorkerJob`. """ ret = [] for queue in [self.high_queue, self.normal_queue, self.low_queue]: ret += queue return ret def handleAdminRequest(self, request): if request.command.startswith(b'cancel job'): self.handleCancelJob(request) elif request.command.startswith(b'status'): self.handleStatus(request) elif request.command.startswith(b'workers'): self.handleWorkers(request) def handleCancelJob(self, request): words = request.command.split() handle = words[2] if handle in self.jobs: for queue in [self.high_queue, self.normal_queue, self.low_queue]: for job in queue: if handle == job.handle: queue.remove(job) del self.jobs[handle] request.connection.conn.send(b'OK\n') return request.connection.conn.send(b'ERR UNKNOWN_JOB\n') def handleStatus(self, request): functions = {} for function in self.functions: # Total, running, workers functions[function] = [0, 0, 0] for job in self.jobs.values(): functions[job.name][0] += 1 if job.running: functions[job.name][1] += 1 for connection in self.active_connections: for function in connection.functions: functions[function][2] += 1 for name, values in functions.items(): request.connection.conn.send(("%s\t%s\t%s\t%s\n" % (name, values[0], values[1], values[2])).encode('utf8')) request.connection.conn.send(b'.\n') def handleWorkers(self, request): for connection in self.active_connections: fd = connection.conn.fileno() ip = connection.host client_id = connection.client_id or '-' functions = ' '.join(connection.functions) request.connection.conn.send(("%s %s %s : %s\n" % (fd, ip, client_id, functions)) .encode('utf8')) request.connection.conn.send(b'.\n') def wakeConnections(self): p = Packet(constants.RES, constants.NOOP, b'') for connection in self.active_connections: if connection.state == 'SLEEP': connection.changeState("AWAKE") connection.sendPacket(p) def _handleSubmitJob(self, packet, precedence): name = packet.getArgument(0) unique = packet.getArgument(1) if not unique: unique = None arguments = packet.getArgument(2, True) self.max_handle += 1 handle = ('H:%s:%s' % (packet.connection.host, self.max_handle)).encode('utf8') job = ServerJob(handle, name, arguments, packet.connection, unique) p = Packet(constants.RES, constants.JOB_CREATED, handle) packet.connection.sendPacket(p) self.jobs[handle] = job packet.connection.related_jobs[handle] = job if precedence == PRECEDENCE_HIGH: self.high_queue.append(job) elif precedence == PRECEDENCE_NORMAL: self.normal_queue.append(job) elif precedence == PRECEDENCE_LOW: self.low_queue.append(job) self.wakeConnections() def handleSubmitJob(self, packet): return self._handleSubmitJob(packet, PRECEDENCE_NORMAL) def handleSubmitJobHigh(self, packet): return self._handleSubmitJob(packet, PRECEDENCE_HIGH) def handleSubmitJobLow(self, packet): return self._handleSubmitJob(packet, PRECEDENCE_LOW) def getJobForConnection(self, connection, peek=False): for queue in [self.high_queue, self.normal_queue, self.low_queue]: for job in queue: if job.name in connection.functions: if not peek: queue.remove(job) connection.related_jobs[job.handle] = job job.worker_connection = connection job.running = True return job return None def handleGrabJobUniq(self, packet): job = self.getJobForConnection(packet.connection) if job: self.sendJobAssignUniq(packet.connection, job) else: self.sendNoJob(packet.connection) def sendJobAssignUniq(self, connection, job): unique = job.unique if not unique: unique = b'' data = b'\x00'.join((job.handle, job.name, unique, job.arguments)) p = Packet(constants.RES, constants.JOB_ASSIGN_UNIQ, data) connection.sendPacket(p) def sendNoJob(self, connection): p = Packet(constants.RES, constants.NO_JOB, b'') connection.sendPacket(p) def handlePreSleep(self, packet): packet.connection.changeState("SLEEP") if self.getJobForConnection(packet.connection, peek=True): self.wakeConnections() def handleWorkComplete(self, packet): self.handlePassthrough(packet, True) def handleWorkFail(self, packet): self.handlePassthrough(packet, True) def handleWorkException(self, packet): self.handlePassthrough(packet, True) def handleWorkData(self, packet): self.handlePassthrough(packet) def handleWorkWarning(self, packet): self.handlePassthrough(packet) def handleWorkStatus(self, packet): handle = packet.getArgument(0) job = self.jobs.get(handle) if not job: raise UnknownJobError() job.numerator = packet.getArgument(1) job.denominator = packet.getArgument(2) self.handlePassthrough(packet) def handlePassthrough(self, packet, finished=False): handle = packet.getArgument(0) job = self.jobs.get(handle) if not job: raise UnknownJobError() packet.code = constants.RES job.client_connection.sendPacket(packet) if finished: del self.jobs[handle] del job.client_connection.related_jobs[handle] del job.worker_connection.related_jobs[handle] def handleSetClientID(self, packet): name = packet.getArgument(0) packet.connection.client_id = name def handleCanDo(self, packet): name = packet.getArgument(0) self.log.debug("Adding function %s to %s" % (name, packet.connection)) packet.connection.functions.add(name) self.functions.add(name) def handleCantDo(self, packet): name = packet.getArgument(0) self.log.debug("Removing function %s from %s" % (name, packet.connection)) packet.connection.functions.remove(name) def handleResetAbilities(self, packet): self.log.debug("Resetting functions for %s" % packet.connection) packet.connection.functions = set() def handleGetStatus(self, packet): handle = packet.getArgument(0) self.log.debug("Getting status for %s" % handle) known = 0 running = 0 numerator = b'' denominator = b'' job = self.jobs.get(handle) if job: known = 1 if job.running: running = 1 numerator = job.numerator or b'' denominator = job.denominator or b'' data = (handle + b'\x00' + str(known).encode('utf8') + b'\x00' + str(running).encode('utf8') + b'\x00' + numerator + b'\x00' + denominator) p = Packet(constants.RES, constants.STATUS_RES, data) packet.connection.sendPacket(p) gear-0.4.0/test-requirements.txt0000664000175300017540000000023612207146231020026 0ustar jenkinsjenkins00000000000000coverage>=3.6 discover fixtures>=0.3.12 hacking>=0.5.3,<0.6 python-subunit sphinx>=1.1.2 testrepository>=0.0.13 testresources testscenarios testtools>=0.9.27 gear-0.4.0/tox.ini0000664000175300017540000000113412207146231015076 0ustar jenkinsjenkins00000000000000[tox] envlist = py26,py27,pep8 [testenv] setenv = VIRTUAL_ENV={envdir} LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = python setup.py testr --slowest --testr-args='{posargs}' [tox:jenkins] sitepackages = True downloadcache = ~/cache/pip [testenv:pep8] commands = flake8 [testenv:cover] setenv = VIRTUAL_ENV={envdir} commands = python setup.py testr --coverage [testenv:venv] commands = {posargs} [flake8] exclude = .venv,.tox,dist,doc,*.egg show-source = true ignore = E123,E125gear-0.4.0/.testr.conf0000664000175300017540000000026412207146231015654 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=OS_TEST_TIMEOUT=60 ${PYTHON:-python} -m subunit.run discover -t ./ gear/tests/ $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list gear-0.4.0/PKG-INFO0000664000175300017540000000136012207146270014664 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: gear Version: 0.4.0 Summary: Pure Python Async Gear Protocol Library Home-page: http://pypi.python.org/pypi/gear Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: python-gear =========== A pure-Python asynchronous library to interface with Gearman. Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python gear-0.4.0/setup.cfg0000664000175300017540000000142212207146270015407 0ustar jenkinsjenkins00000000000000[metadata] name = gear author = OpenStack author-email = openstack-dev@lists.openstack.org summary = Pure Python Async Gear Protocol Library description-file = README.rst home-page = http://pypi.python.org/pypi/gear classifier = Development Status :: 4 - Beta Environment :: Console Environment :: OpenStack Intended Audience :: Developers Intended Audience :: Information Technology License :: OSI Approved :: Apache Software License Operating System :: OS Independent Programming Language :: Python [files] packages = gear [global] setup-hooks = pbr.hooks.setup_hook [entry_points] console_scripts = geard = gear.cmd.geard:main [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [egg_info] tag_build = tag_date = 0 tag_svn_revision = 0 gear-0.4.0/requirements.txt0000664000175300017540000000004612207146231017050 0ustar jenkinsjenkins00000000000000pbr>=0.5.21,<1.0 python-daemon extras gear-0.4.0/README.rst0000664000175300017540000000012712207146231015253 0ustar jenkinsjenkins00000000000000python-gear =========== A pure-Python asynchronous library to interface with Gearman. gear-0.4.0/AUTHORS0000664000175300017540000000027312207146270014641 0ustar jenkinsjenkins00000000000000Clark Boylan David Shrewsbury James E. Blair Khai Do Monty Taylor gear-0.4.0/CONTRIBUTING.rst0000664000175300017540000000104112207146231016221 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in the "If you're a developer, start here" section of this page: http://wiki.openstack.org/HowToContribute Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://wiki.openstack.org/GerritWorkflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/python-gear gear-0.4.0/MANIFEST.in0000664000175300017540000000025012207146231015317 0ustar jenkinsjenkins00000000000000include AUTHORS include ChangeLog include README.rst include requirements.txt include test-requirements.txt exclude .gitignore exclude .gitreview global-exclude *.pyc gear-0.4.0/gear.egg-info/0000775000175300017540000000000012207146270016177 5ustar jenkinsjenkins00000000000000gear-0.4.0/gear.egg-info/SOURCES.txt0000664000175300017540000000076512207146270020073 0ustar jenkinsjenkins00000000000000.testr.conf AUTHORS CONTRIBUTING.rst ChangeLog LICENSE MANIFEST.in README.rst requirements.txt setup.cfg setup.py test-requirements.txt tox.ini doc/source/conf.py doc/source/index.rst gear/__init__.py gear/constants.py gear.egg-info/PKG-INFO gear.egg-info/SOURCES.txt gear.egg-info/dependency_links.txt gear.egg-info/entry_points.txt gear.egg-info/not-zip-safe gear.egg-info/requires.txt gear.egg-info/top_level.txt gear/cmd/__init__.py gear/cmd/geard.py gear/tests/__init__.py gear/tests/test_gear.pygear-0.4.0/gear.egg-info/PKG-INFO0000664000175300017540000000136012207146270017274 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: gear Version: 0.4.0 Summary: Pure Python Async Gear Protocol Library Home-page: http://pypi.python.org/pypi/gear Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: python-gear =========== A pure-Python asynchronous library to interface with Gearman. Platform: UNKNOWN Classifier: Development Status :: 4 - Beta Classifier: Environment :: Console Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: OS Independent Classifier: Programming Language :: Python gear-0.4.0/gear.egg-info/not-zip-safe0000664000175300017540000000000112207146232020423 0ustar jenkinsjenkins00000000000000 gear-0.4.0/gear.egg-info/dependency_links.txt0000664000175300017540000000000112207146270022245 0ustar jenkinsjenkins00000000000000 gear-0.4.0/gear.egg-info/top_level.txt0000664000175300017540000000000512207146270020724 0ustar jenkinsjenkins00000000000000gear gear-0.4.0/gear.egg-info/requires.txt0000664000175300017540000000004512207146270020576 0ustar jenkinsjenkins00000000000000pbr>=0.5.21,<1.0 python-daemon extrasgear-0.4.0/gear.egg-info/entry_points.txt0000664000175300017540000000005712207146270021477 0ustar jenkinsjenkins00000000000000[console_scripts] geard = gear.cmd.geard:main gear-0.4.0/doc/0000775000175300017540000000000012207146270014334 5ustar jenkinsjenkins00000000000000gear-0.4.0/doc/source/0000775000175300017540000000000012207146270015634 5ustar jenkinsjenkins00000000000000gear-0.4.0/doc/source/conf.py0000664000175300017540000001717412207146231017142 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # # Gear documentation build configuration file, created by # sphinx-quickstart on Mon Apr 8 15:28:36 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Gear' copyright = u'2013, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Geardoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Gear.tex', u'Gear Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'gear', u'Gear Documentation', [u'OpenStack Foundation'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Gear', u'Gear Documentation', u'OpenStack Foundation', 'Gear', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' gear-0.4.0/doc/source/index.rst0000664000175300017540000001334512207146231017500 0ustar jenkinsjenkins00000000000000Gear: Asynchronous Event-Driven Gearman Interface ================================================= .. module:: gear :synopsis: Asynchronous Event-Driven Gearman Interface This module implements an asynchronous event-driven interface to Gearman. It provides interfaces to build a client or worker, and access to the administrative protocol. The design approach is to keep it simple, with a relatively thin abstration of the Gearman protocol itself. It should be easy to use to build a client or worker that operates either synchronously or asynchronously. The module also provides a simple Gearman server for use as a convenience in unit tests. The server is not designed for production use under load. Client Example -------------- To use the client interface, instantiate a :py:class:`Client`, and submit a :py:class:`Job`. For example:: import gear client = gear.Client() client.addServer('gearman.example.com') client.waitForServer() # Wait for at least one server to be connected job = gear.Job("reverse", "test string") client.submitJob(job) The waitForServer() call is only necessary when running in a synchronous context. When running asynchronously, it is probably more desirable to omit that call and instead handle the :py:class:`NoConnectedServersError` exception that submitJob may raise if no servers are connected at the time. When Gearman returns data to the client, the :py:class:`Job` object is updated immediately. Event handlers are called on the :py:class:`Client` object so that subclasses have ample facilities for reacting to events synchronously. Worker Example -------------- To use the worker interface, create a :py:class:`Worker`, register at least one function that the worker supports, and then wait for a Job to be dispatched to the worker. An example of a Gearman worker:: import gear worker = gear.Worker('reverser') worker.addServer('gearman.example.com') worker.registerFunction("reverse") while True: job = worker.getJob() job.sendWorkComplete(job.arguments.reverse()) SSL Connections --------------- For versions of Gearman supporting SSL connections, specify the files containing the SSL private key, public certificate, and CA certificate in the addServer() call. For example:: ssl_key = '/path/to/key.pem' ssl_cert = '/path/to/cert.pem' ssl_ca = '/path/to/ca.pem' client.addServer('gearman.example.com', 4730, ssl_key, ssl_cert, ssl_ca) All three files must be specified for SSL to be used. API Reference ============= The following sections document the module's public API. It is divided into sections focusing on implementing a client, a worker, using the administrative protocol, and then the classes that are common to all usages of the module. Client Usage ------------ The classes in this section should be all that are needed in order to implement a Gearman client. Client Objects ^^^^^^^^^^^^^^ .. autoclass:: gear.Client :members: :inherited-members: Job Objects ^^^^^^^^^^^ .. autoclass:: gear.Job :members: :inherited-members: Worker Usage ------------ The classes in this section should be all that are needed in order to implement a Gearman worker. Worker Objects ^^^^^^^^^^^^^^ .. autoclass:: gear.Worker :members: :inherited-members: FunctionRecord Objects ^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: gear.FunctionRecord :members: :inherited-members: WorkerJob Objects ^^^^^^^^^^^^^^^^^ .. autoclass:: gear.WorkerJob :members: :inherited-members: Administrative Protocol ----------------------- Gearman provides an administrative protocol that is multiplexed on the same connection as the normal binary protocol for jobs. The classes in this section are useful for working with that protocol. They need to be used with an existing :py:class:`Connection` object; either one obtained via a :py:class:`Client` or :py:class:`Worker`, or via direct instantiation of :py:class:`Connection` to a Gearman server. AdminRequest Objects ^^^^^^^^^^^^^^^^^^^^ .. autoclass:: gear.AdminRequest :members: :inherited-members: .. autoclass:: gear.StatusAdminRequest :inherited-members: .. autoclass:: gear.ShowJobsAdminRequest :inherited-members: .. autoclass:: gear.ShowUniqueJobsAdminRequest :inherited-members: .. autoclass:: gear.CancelJobAdminRequest :inherited-members: .. autoclass:: gear.VersionAdminRequest :inherited-members: Server Usage ------------ A simple Gearman server is provided for convenience in unit testing, but is not designed for production use at scale. It takes no parameters other than the port number on which to listen. Server Objects ^^^^^^^^^^^^^^ .. autoclass:: gear.Server :members: :inherited-members: Common ------ These classes do not normally need to be directly instatiated to use the gear API, but they may be returned or otherwise be accessible from other classes in this module. They generally operate at a lower level, but still form part of the public API. Connection Objects ^^^^^^^^^^^^^^^^^^ .. autoclass:: gear.Connection :members: :inherited-members: Packet Objects ^^^^^^^^^^^^^^ .. autoclass:: gear.Packet :members: :inherited-members: Exceptions ^^^^^^^^^^ .. autoexception:: gear.ConnectionError .. autoexception:: gear.InvalidDataError .. autoexception:: gear.ConfigurationError .. autoexception:: gear.NoConnectedServersError .. autoexception:: gear.UnknownJobError .. autoexception:: gear.InterruptedError Constants --------- These constants are used by public API classes. .. py:data:: PRECEDENCE_NORMAL Normal job precedence. .. py:data:: PRECEDENCE_LOW Low job precedence. .. py:data:: PRECEDENCE_HIGH High job precedence. .. automodule:: gear.constants :members: Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` gear-0.4.0/LICENSE0000664000175300017540000002613612207146231014601 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. gear-0.4.0/ChangeLog0000664000175300017540000003747012207146270015354 0ustar jenkinsjenkins00000000000000commit a646bf6dc65758a1b25028e561ee475e583ad317 Merge: 59d4be6 a49102f Author: Jenkins Date: Tue Aug 27 01:38:02 2013 +0000 Merge "Fix ordering of state change in server" commit a49102f44ce7cb2a439bc593185b888a9ef0557e Author: James E. Blair Date: Mon Aug 26 18:00:36 2013 -0700 Fix ordering of state change in server It was possible for an entire noop-grab_job-job_assign cycle to happen between before the wakeConnections method set the connection state to AWAKE. Set it first before sending the NOOP packet. Change-Id: I0d2170b6c583d95a44e289eca19611d11f93c6fd commit 59d4be6ca9220807290f4e5023e830dd0c9582b6 Author: Khai Do Date: Thu Aug 15 11:46:17 2013 -0700 fix project dependencies update pbr requirements to modern state. also added geard requirements to requirements.txt Change-Id: I302b4fa49574b48bb34eb3e7d7df33d2d2c35ce5 commit 12528bfcceb5c79b08a7df517c4ccbc0c90240d8 Author: James E. Blair Date: Tue Aug 6 10:48:31 2013 -0700 Server: make job handle safer Use a global counter in the job handle. Previously the job handle was using the hostname + a monotonically increasing counter for the connection. If there were two connections from the same host, duplicate handles would be generated. Instead, use a global counter for the job handle to guarantee that no collisions will happen. The hostname portion of the handle is no longer needed, but is retained for format compatibility with C gearman (in case some tool assumes something about the format). Change-Id: I44d1784b97d78fc1879a5612316165fa49b90b83 commit 1ba52133fcc328650ecce8b5ee0941a888ae461a Merge: 86606d3 6401d0b Author: Jenkins Date: Fri Jul 12 01:10:56 2013 +0000 Merge "Remove E128 pep8 ignore" commit 6401d0b2377c1310aa3e53ec6365b8f299465912 Author: James E. Blair Date: Tue Jul 9 09:20:04 2013 -0700 Remove E128 pep8 ignore Change-Id: I826f90c363787ee93f0ac3706a57d621dd0c0ce0 commit 86606d393493373420433f82e8fcb66e5ae0d067 Merge: 9f3057e 035b883 Author: Jenkins Date: Mon Jul 8 23:02:56 2013 +0000 Merge "Add SSL support for clients, workers, and geard" commit 035b883e7a71e3af044c61ea3781e578c6da34d0 Author: David Shrewsbury Date: Tue Jul 2 22:20:20 2013 -0400 Add SSL support for clients, workers, and geard This change requires all three SSL files (private key, public certificate, and CA certificate) to be specified in order for SSL to be used. Change-Id: Ie69b9c16325c04a1818c0b54d3b23526d11ed6a1 commit 9f3057ec280ab1ba1ca5622e609d204ad93f3c1e Author: Clark Boylan Date: Tue Jul 2 19:20:32 2013 -0700 Set hacking versions. * test-requirements.txt: Specify valid versions of hacking and remove flake8 mention. This prevents confusion of versions for common dependencies of flake8, hacking, and pyflakes. Change-Id: I89b32c53f7a4fdba717e6a4c2cbf531ac718af6c commit 3ca44afc2cb040bdc4cd2f090ae0b9f364b8e6c4 Author: James E. Blair Date: Thu Jun 20 14:06:47 2013 -0700 Add server command: "geard". Change-Id: I38f5ac0c960f476838c798c775aef4f91dde43f2 commit b0953032ef941a8552aa74ad1df95d7b6dea4677 Author: James E. Blair Date: Wed Jun 12 17:00:36 2013 -0700 Fix accounting in server 'status' command. The calculations for the number of jobs in the queue and running as reported by 'status' were incorrect. Change-Id: I62bb9583c649b665bacc740a37e8f496f2720fc8 commit 218fe26a2c278bdc352d8714ffbfd655052039f6 Merge: 570534c 0c5390b Author: Jenkins Date: Mon Jun 10 22:41:42 2013 +0000 Merge "Make BaseClientServer threads daemons." commit 0c5390b298b846d89da155e289390af260fa5a71 Author: Clark Boylan Date: Mon Jun 10 11:17:50 2013 -0700 Make BaseClientServer threads daemons. Gear should run its threads as daemon threads to prevent it from taking control of the processes it is running in if that processes other threads die. Change-Id: I43ae02e5bbf63c7aa53eeefe4e88dcc904e80e4a commit 570534cff9bb1267b131bd7e2edcfd1eff13b230 Author: James E. Blair Date: Fri Jun 7 12:52:00 2013 -0700 Clean up server jobs on disconnect. When a client or worker disconnects from the server, clean up any records of associated jobs. Change-Id: I1fb234a32df6f9d23f4de0a2fb4c1c82cf259329 commit f88b0ddb971f177a1ff7e92e3e366cbf079527bc Author: James E. Blair Date: Thu Jun 6 09:33:04 2013 -0700 Add tiered queues to server. (Low/normal/high precedence for jobs). Change-Id: Ia10866bcedb7dc828a44e342c202d90d60de87a5 commit e180fe65d74ec09388f4096dbe733e2fc7cbdf37 Author: James E. Blair Date: Thu Jun 6 08:54:29 2013 -0700 Add workers admin command to server. Change-Id: If41816e7d823593a469f16255662dc12c6b7fd4b commit 1e76bfa886dd340069a5bf80535e5eb8f37a7c88 Author: Clark Boylan Date: Mon Jun 3 20:09:34 2013 -0700 Support python3. Import queue on python3 and Queue on python2. Use bytestrings for all string constants. Slice byte arrays when comparing to bytestrings. Don't use basestring. Gear now expects its packet data to be byte strings. Non data fields may be passed as unicode strings, bytes or bytearrays. Unicode strings will be converted to bytes using the utf8 encoding. Change-Id: Iccdd01f9ce6f649f8bbaa93370d330c78c89f087 commit 8336e63685b06f6165f43cc915d46c0c4bb13b32 Author: James E. Blair Date: Wed Jun 5 08:43:26 2013 -0700 Explicitly check event state after waiting. In python2.6, the wait function always returns None, so explicitly check the status of events after waiting with a timeout. Change-Id: Ie354c1841c49725c93aa8ddf1c04f0e9a9b56548 commit 4bc883e9f3d87df72d8380d8ac0e223c903a6e45 Author: James E. Blair Date: Tue Jun 4 19:29:32 2013 -0700 Handle blobs with nulls. Some areas where we handle arbitrary blobs (function arguments and echo data) were incorrectly stopping at the first null byte. Instead, when we parse out the arguments to a packet, handle the last one as a special case if it can contain null bytes (return up to the end of the packet). Change-Id: I45178347cdb058e34329c6601f5e841da0d3b40b commit 0e87ebfbbc46605e4da20b677966289221310c8f Author: James E. Blair Date: Thu May 30 12:26:34 2013 -0700 Wait for responses to admin requests. That's probably what callers will want to do anyway. Change-Id: I9086e2e182f29ee778b105c82bcef81c12aeb54a commit 2bb0fcaa28314a54211b1c640b93a325425ac5a2 Author: James E. Blair Date: Thu May 30 12:11:10 2013 -0700 Wait for responses to some requests. SUBMIT_JOB (and related) requests as well as OPTION_REQ requests always return a response (success or error), so have the client wait for a response and handle errors appropriately. Since they can both return ERROR packets, and the gearman protocol has no mechanism to associate ERROR responses with the requests that triggered them, put each task that may trigger an error response in a fifo queue and assume that what we get back is in the same order. The only other commands (properly executed) that can trigger an ERROR response are the WORK_ requests, which are only sent by workers, not clients. If anything about the packet sequencing seems out of order (eg, we get a successful response that doesn't match the next item on the fifo queue) restart the connection (there is likely no way to recover). Change-Id: Ibca972f5d57313cebba028ed473273f46fa68e2a commit f7c06c0dd176501b00a018eb457154422597e3a4 Author: James E. Blair Date: Thu May 23 15:41:18 2013 -0700 Handle GET_STATUS in the server. Change-Id: Iff6cec77de9eed8e462ec3334fef8c487aa96f2c commit c642b2069d6d3e3c53de6b77bf204323c32c3364 Author: James E. Blair Date: Thu May 23 08:26:09 2013 -0700 Fix connection debugging messages. The debugging messages were printing the wrong connection handle. Change-Id: If4548296015942879c572d2a6b4964b553a9c535 commit 463b84b3d45f3967882fffe993982fef8092fcbf Author: James E. Blair Date: Wed May 22 16:45:49 2013 -0700 Fix miscategorized packets. Some packets were being sent with the REQ code instead of RES. Change-Id: I05fc56ca1dc7ebd2367567e91b4098d9e97b8fb4 commit 1c4df7622f8e291ebf24a7fa765618120b0ef0a7 Author: James E. Blair Date: Tue May 21 10:46:51 2013 -0700 More improvements to admin requests. Finish replacing isComplete. Also, add back in the bit where we save the response because it turns out that's important. Change-Id: I8441386a9aae9c62c1527e780d0b41271f465ac5 commit e3b42f6795b9fc5907e333df39f2a3370efbabf6 Author: James E. Blair Date: Mon May 20 15:18:09 2013 -0700 Improve efficiency of admin packets. Using a regex to determine when an admin packet is complete is very slow with real-world data for most admin responses. Instead, perform simple string comparison checks. Change-Id: I5ec32c8ab2d44f20061343ee1f95a3d2066e8642 commit cebd9d7ad9ef313b94d3e564d418de93cfde0c30 Author: James E. Blair Date: Thu May 9 14:31:02 2013 -0700 Add remaining client/worker handlers. Adds all remaining handle methods needed to handle responses that clients and workers can expect. Adds an echo method to connection objects (for ping tests). Adds all packet types to the handle method in BaseClientServer, and add default implementations for each of them that logs an error. Subclasses (eg Client, Worker) will override them as appropriate. Add a "broadcast lock" so that Clients and Workers can maintain exclusivity around operations that apply to all connections. Change-Id: Ia31f0941687ccfee711c3818e8e0c21ccbd1b313 commit ea796eb3403be7e616939ec1de7ae8da811314f0 Author: James E. Blair Date: Thu May 9 09:44:02 2013 -0700 Encoding bugfix. Use bytearrays in Packet.toBinary(). Encode strings as utf8. Return the job in the default handleDisconnect method (as the documentation says should be done). Change-Id: I2d2410abff94bc9a7a20f3729c193b5ae8a610d1 commit 9aa4f48bb66fba044013ea498b55af2269d110d5 Author: James E. Blair Date: Wed May 8 12:24:36 2013 -0700 Add status command to test server. Also record connection times to aid subclasses/users in detecting when a client has reconnected. Change-Id: I1abbf85fd1e20e6c509bcb54a81aa8bf45c135ef commit 5b7852418d51eb67617f3affcccd8749e5911145 Author: James E. Blair Date: Wed May 8 10:58:15 2013 -0700 Some bugfixes and test/debugging assist. * Add some more debug log messages * Fix copy/paste errors in function signatures * Handle port 0 for the test server (system assigns a port at random -- record that port for the caller to inspect) * In the server, change connection state to awake after sending a noop packet (to avoid sending multiple noops) * Split handleGrabJobUnique in the server into several functions for modularity (helpful for subclasses) * In the server, immediately wake up a connection on receipt of a pre_sleep packet if there is a waiting job (it may have arrived _after_ the most recent grab_job). Change-Id: I0f2057bddfd6a13a0d2db08641ef51502ec06d9e commit 61bd6ce5708d27015bb9ccf01aff99bb2b0ac2d8 Author: James E. Blair Date: Thu May 2 17:38:47 2013 -0700 Add simple Gearman server. For testing, not for real use under load. Change-Id: I9c84b1eea7d868e907b80b6edf60c49c172c356b commit 772d256328b4c876a0f308240ef941800def55fc Author: James E. Blair Date: Wed May 1 11:57:39 2013 -0700 Reorganize documentation. Split up into sections by usage. Improve autodoc output of AdminRequest objects and enforce appropriate arguments in constructors. Change-Id: I834e4d10edbee9494d68ae010f439624172a111f commit 868cc239c93fe93268d53776636d02755c99ce4c Author: James E. Blair Date: Tue Apr 30 16:23:34 2013 -0700 Add worker functionality. Change-Id: I7d624df38278dbfba8a3123285b33a52bdc3b870 commit 38cd3373890bf4cddf72d84f453052ddcad0f207 Author: James E. Blair Date: Mon Apr 29 11:54:32 2013 -0700 Add a method that waits for a job handle. Job handles should be returned immediately, and are quite handy to have in further interactions with gearman, so provide a convenience method to wait for a job handle after submission. Change-Id: I3b2fb3c75bf4c0618647f49386cb198e08008fab commit a470e5e4e0742006c8b4e990be1fb7b29b54eb88 Author: James E. Blair Date: Thu Apr 25 16:34:11 2013 -0700 Support administrative protocol. Add support for multiplexing the text-based admin protocol on existing connections. Add some basic support for some admin commands (in an extensible way). Change-Id: Ib6c9891e22518d610b5abed75c78efcc4f23ed60 commit 15ab33ed3bfd6b88b79bcac41dc0b3633c725c20 Author: James E. Blair Date: Thu Apr 25 11:29:32 2013 -0700 Treat job data as an array. So that users have the option of seeing the discrete data chunks returned by gearman. If they should be concatenated, that's easy enough for the user to do. Change-Id: I19ebed92c41271546ff908582d671cfbaa34977c commit b52c073bb4bafdd12ecb055c88dc761e18204879 Author: James E. Blair Date: Thu Apr 11 14:47:40 2013 -0700 Fix typo in workStatus. We were checking the numerator twice. Get the denominator. Change-Id: Ib8e8868a3d067dcebb91ae8c9181a22b975abe99 commit ba9ff1c9c6c91258ec7960a2eb92d80a7d5f0369 Author: James E. Blair Date: Mon Apr 8 15:17:42 2013 -0700 Add Sphinx documentation. Add more events. Also, return Job objects from event handlers for convenience of subclasses. Add sphinx config to setup.cfg. Change-Id: I2ccc32fbc5f043a67d5da86cc3c37ab3d2c30d99 commit 8a8b7ffab0f53a4e4c0890912fe034a9583f40c9 Author: Monty Taylor Date: Fri Apr 5 19:40:07 2013 -0400 Make flake8 and OpenStack Hacking clean. Change-Id: Ifbc78187e29e7befefcfff47b803ff20c86f6e75 commit 207e5cf884d7c7a0f0e95438cb8860b5092c0e52 Author: Monty Taylor Date: Fri Apr 5 19:31:07 2013 -0400 Add initial test suite. Change-Id: I4fdbc8d6c3073b7edfdc8bf486575820088faa72 commit 855b217f6dac1fd6a613ba8bfda120f8c96734b8 Author: Monty Taylor Date: Fri Apr 5 19:10:41 2013 -0400 Add pbr-based setup. Change-Id: I8ff06c6711e1d9f5bf4d1278f37d4e2f7f799e3d commit b6097dc7ed885ff7241ca90f95246c66dbe7cf10 Author: James E. Blair Date: Fri Apr 5 12:08:58 2013 -0700 Initial commit. Change-Id: Ia612b44511b19e22a73cb6d4662fc02207af9321 commit 604408042330eb023cb8e442eb9302b4d365d9bb Author: Openstack Project Creator Date: Fri Apr 5 18:53:07 2013 +0000 Added .gitreview