rq-1.2.2/0000755000076500000240000000000013615036417012570 5ustar selwinstaff00000000000000rq-1.2.2/PKG-INFO0000644000076500000240000000313613615036417013670 0ustar selwinstaff00000000000000Metadata-Version: 1.2 Name: rq Version: 1.2.2 Summary: RQ is a simple, lightweight, library for creating background jobs, and processing them. Home-page: https://github.com/nvie/rq/ Author: Vincent Driessen Author-email: vincent@3rdcloud.com License: BSD Description: rq is a simple, lightweight, library for creating background jobs, and processing them. Platform: any Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: End Users/Desktop Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: Science/Research Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: POSIX Classifier: Operating System :: MacOS Classifier: Operating System :: Unix Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Internet Classifier: Topic :: Scientific/Engineering Classifier: Topic :: System :: Distributed Computing Classifier: Topic :: System :: Systems Administration Classifier: Topic :: System :: Monitoring Requires-Python: >=2.7 rq-1.2.2/LICENSE0000644000076500000240000000273613246365164013611 0ustar selwinstaff00000000000000Copyright 2012 Vincent Driessen. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY VINCENT DRIESSEN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL VINCENT DRIESSEN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of Vincent Driessen. rq-1.2.2/rq.egg-info/0000755000076500000240000000000013615036417014704 5ustar selwinstaff00000000000000rq-1.2.2/rq.egg-info/PKG-INFO0000644000076500000240000000313613615036417016004 0ustar selwinstaff00000000000000Metadata-Version: 1.2 Name: rq Version: 1.2.2 Summary: RQ is a simple, lightweight, library for creating background jobs, and processing them. Home-page: https://github.com/nvie/rq/ Author: Vincent Driessen Author-email: vincent@3rdcloud.com License: BSD Description: rq is a simple, lightweight, library for creating background jobs, and processing them. Platform: any Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers Classifier: Intended Audience :: End Users/Desktop Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: Science/Research Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: BSD License Classifier: Operating System :: POSIX Classifier: Operating System :: MacOS Classifier: Operating System :: Unix Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Topic :: Software Development :: Libraries :: Python Modules Classifier: Topic :: Internet Classifier: Topic :: Scientific/Engineering Classifier: Topic :: System :: Distributed Computing Classifier: Topic :: System :: Systems Administration Classifier: Topic :: System :: Monitoring Requires-Python: >=2.7 rq-1.2.2/rq.egg-info/not-zip-safe0000644000076500000240000000000113176307777017146 0ustar selwinstaff00000000000000 rq-1.2.2/rq.egg-info/SOURCES.txt0000644000076500000240000000126113615036417016570 0ustar selwinstaff00000000000000LICENSE MANIFEST.in README.md setup.cfg setup.py rq/__init__.py rq/connections.py rq/decorators.py rq/defaults.py rq/dummy.py rq/exceptions.py rq/job.py rq/local.py rq/logutils.py rq/queue.py rq/registry.py rq/scheduler.py rq/suspension.py rq/timeouts.py rq/utils.py rq/version.py rq/worker.py rq/worker_registration.py rq.egg-info/PKG-INFO rq.egg-info/SOURCES.txt rq.egg-info/dependency_links.txt rq.egg-info/entry_points.txt rq.egg-info/not-zip-safe rq.egg-info/requires.txt rq.egg-info/top_level.txt rq/cli/__init__.py rq/cli/cli.py rq/cli/helpers.py rq/compat/__init__.py rq/compat/connections.py rq/compat/dictconfig.py rq/contrib/__init__.py rq/contrib/legacy.py rq/contrib/sentry.pyrq-1.2.2/rq.egg-info/entry_points.txt0000644000076500000240000000012213615036417020175 0ustar selwinstaff00000000000000[console_scripts] rq = rq.cli:main rqinfo = rq.cli:info rqworker = rq.cli:worker rq-1.2.2/rq.egg-info/requires.txt0000644000076500000240000000003013615036417017275 0ustar selwinstaff00000000000000redis>=3.0.0 click>=5.0 rq-1.2.2/rq.egg-info/top_level.txt0000644000076500000240000000001113615036417017426 0ustar selwinstaff00000000000000rq tests rq-1.2.2/rq.egg-info/dependency_links.txt0000644000076500000240000000000113615036417020752 0ustar selwinstaff00000000000000 rq-1.2.2/MANIFEST.in0000644000076500000240000000005213566232513014323 0ustar selwinstaff00000000000000include LICENSE recursive-exclude tests * rq-1.2.2/README.md0000644000076500000240000000516613566232513014057 0ustar selwinstaff00000000000000RQ (_Redis Queue_) is a simple Python library for queueing jobs and processing them in the background with workers. It is backed by Redis and it is designed to have a low barrier to entry. It should be integrated in your web stack easily. RQ requires Redis >= 3.0.0. [![Build status](https://travis-ci.org/rq/rq.svg?branch=master)](https://secure.travis-ci.org/rq/rq) [![PyPI](https://img.shields.io/pypi/pyversions/rq.svg)](https://pypi.python.org/pypi/rq) [![Coverage](https://codecov.io/gh/rq/rq/branch/master/graph/badge.svg)](https://codecov.io/gh/rq/rq) Full documentation can be found [here][d]. ## Support RQ If you find RQ useful, please consider supporting this project via [Tidelift](https://tidelift.com/subscription/pkg/pypi-rq?utm_source=pypi-rq&utm_medium=referral&utm_campaign=readme). ## Getting started First, run a Redis server, of course: ```console $ redis-server ``` To put jobs on queues, you don't have to do anything special, just define your typically lengthy or blocking function: ```python import requests def count_words_at_url(url): """Just an example function that's called async.""" resp = requests.get(url) return len(resp.text.split()) ``` You do use the excellent [requests][r] package, don't you? Then, create an RQ queue: ```python from redis import Redis from rq import Queue q = Queue(connection=Redis()) ``` And enqueue the function call: ```python from my_module import count_words_at_url job = q.enqueue(count_words_at_url, 'http://nvie.com') ``` For a more complete example, refer to the [docs][d]. But this is the essence. ### The worker To start executing enqueued function calls in the background, start a worker from your project's directory: ```console $ rq worker *** Listening for work on default Got count_words_at_url('http://nvie.com') from default Job result = 818 *** Listening for work on default ``` That's about it. ## Installation Simply use the following command to install the latest released version: pip install rq If you want the cutting edge version (that may well be broken), use this: pip install -e git+https://github.com/nvie/rq.git@master#egg=rq ## Project history This project has been inspired by the good parts of [Celery][1], [Resque][2] and [this snippet][3], and has been created as a lightweight alternative to the heaviness of Celery or other AMQP-based queueing implementations. [r]: http://python-requests.org [d]: http://python-rq.org/ [m]: http://pypi.python.org/pypi/mailer [p]: http://docs.python.org/library/pickle.html [1]: http://www.celeryproject.org/ [2]: https://github.com/resque/resque [3]: http://flask.pocoo.org/snippets/73/ rq-1.2.2/setup.py0000644000076500000240000000550013611564137014303 0ustar selwinstaff00000000000000""" rq is a simple, lightweight, library for creating background jobs, and processing them. """ import os from setuptools import setup, find_packages def get_version(): basedir = os.path.dirname(__file__) try: with open(os.path.join(basedir, 'rq/version.py')) as f: locals = {} exec(f.read(), locals) return locals['VERSION'] except FileNotFoundError: raise RuntimeError('No version info found.') setup( name='rq', version=get_version(), url='https://github.com/nvie/rq/', license='BSD', author='Vincent Driessen', author_email='vincent@3rdcloud.com', description='RQ is a simple, lightweight, library for creating background ' 'jobs, and processing them.', long_description=__doc__, packages=find_packages(exclude=['tests']), include_package_data=True, zip_safe=False, platforms='any', install_requires=[ 'redis >= 3.0.0', 'click >= 5.0' ], python_requires='>=2.7', entry_points={ 'console_scripts': [ 'rq = rq.cli:main', # NOTE: rqworker/rqinfo are kept for backward-compatibility, # remove eventually (TODO) 'rqinfo = rq.cli:info', 'rqworker = rq.cli:worker', ], }, classifiers=[ # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers # 'Development Status :: 1 - Planning', # 'Development Status :: 2 - Pre-Alpha', # 'Development Status :: 3 - Alpha', # 'Development Status :: 4 - Beta', 'Development Status :: 5 - Production/Stable', # 'Development Status :: 6 - Mature', # 'Development Status :: 7 - Inactive', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Information Technology', 'Intended Audience :: Science/Research', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: BSD License', 'Operating System :: POSIX', 'Operating System :: MacOS', 'Operating System :: Unix', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Internet', 'Topic :: Scientific/Engineering', 'Topic :: System :: Distributed Computing', 'Topic :: System :: Systems Administration', 'Topic :: System :: Monitoring', ] ) rq-1.2.2/setup.cfg0000644000076500000240000000024013615036417014405 0ustar selwinstaff00000000000000[bdist_rpm] requires = redis >= 3.0.0 click >= 3.0 [wheel] universal = 1 [flake8] max-line-length = 120 ignore = E731 [egg_info] tag_build = tag_date = 0 rq-1.2.2/rq/0000755000076500000240000000000013615036417013212 5ustar selwinstaff00000000000000rq-1.2.2/rq/queue.py0000644000076500000240000005275413614762704014731 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) import uuid import warnings from datetime import datetime from redis import WatchError from .compat import as_text, string_types, total_ordering, utc from .connections import resolve_connection from .defaults import DEFAULT_RESULT_TTL from .exceptions import DequeueTimeout, NoSuchJobError, UnpickleError from .job import Job, JobStatus from .utils import backend_class, import_attribute, parse_timeout, utcnow def compact(lst): return [item for item in lst if item is not None] @total_ordering class Queue(object): job_class = Job DEFAULT_TIMEOUT = 180 # Default timeout seconds. redis_queue_namespace_prefix = 'rq:queue:' redis_queues_keys = 'rq:queues' @classmethod def all(cls, connection=None, job_class=None): """Returns an iterable of all Queues. """ connection = resolve_connection(connection) def to_queue(queue_key): return cls.from_queue_key(as_text(queue_key), connection=connection, job_class=job_class) return [to_queue(rq_key) for rq_key in connection.smembers(cls.redis_queues_keys) if rq_key] @classmethod def from_queue_key(cls, queue_key, connection=None, job_class=None): """Returns a Queue instance, based on the naming conventions for naming the internal Redis keys. Can be used to reverse-lookup Queues by their Redis keys. """ prefix = cls.redis_queue_namespace_prefix if not queue_key.startswith(prefix): raise ValueError('Not a valid RQ queue key: {0}'.format(queue_key)) name = queue_key[len(prefix):] return cls(name, connection=connection, job_class=job_class) def __init__(self, name='default', default_timeout=None, connection=None, is_async=True, job_class=None, **kwargs): self.connection = resolve_connection(connection) prefix = self.redis_queue_namespace_prefix self.name = name self._key = '{0}{1}'.format(prefix, name) self._default_timeout = parse_timeout(default_timeout) or self.DEFAULT_TIMEOUT self._is_async = is_async if 'async' in kwargs: self._is_async = kwargs['async'] warnings.warn('The `async` keyword is deprecated. Use `is_async` instead', DeprecationWarning) # override class attribute job_class if one was passed if job_class is not None: if isinstance(job_class, string_types): job_class = import_attribute(job_class) self.job_class = job_class def __len__(self): return self.count def __nonzero__(self): return True def __bool__(self): return True def __iter__(self): yield self @property def key(self): """Returns the Redis key for this Queue.""" return self._key @property def registry_cleaning_key(self): """Redis key used to indicate this queue has been cleaned.""" return 'rq:clean_registries:%s' % self.name def acquire_cleaning_lock(self): """Returns a boolean indicating whether a lock to clean this queue is acquired. A lock expires in 899 seconds (15 minutes - 1 second) """ return self.connection.set(self.registry_cleaning_key, 1, nx=1, ex=899) def empty(self): """Removes all messages on the queue.""" script = """ local prefix = "{0}" local q = KEYS[1] local count = 0 while true do local job_id = redis.call("lpop", q) if job_id == false then break end -- Delete the relevant keys redis.call("del", prefix..job_id) redis.call("del", prefix..job_id..":dependents") count = count + 1 end return count """.format(self.job_class.redis_job_namespace_prefix).encode("utf-8") script = self.connection.register_script(script) return script(keys=[self.key]) def delete(self, delete_jobs=True): """Deletes the queue. If delete_jobs is true it removes all the associated messages on the queue first.""" if delete_jobs: self.empty() with self.connection.pipeline() as pipeline: pipeline.srem(self.redis_queues_keys, self._key) pipeline.delete(self._key) pipeline.execute() def is_empty(self): """Returns whether the current queue is empty.""" return self.count == 0 @property def is_async(self): """Returns whether the current queue is async.""" return bool(self._is_async) def fetch_job(self, job_id): try: job = self.job_class.fetch(job_id, connection=self.connection) except NoSuchJobError: self.remove(job_id) else: if job.origin == self.name: return job def get_job_ids(self, offset=0, length=-1): """Returns a slice of job IDs in the queue.""" start = offset if length >= 0: end = offset + (length - 1) else: end = length return [as_text(job_id) for job_id in self.connection.lrange(self.key, start, end)] def get_jobs(self, offset=0, length=-1): """Returns a slice of jobs in the queue.""" job_ids = self.get_job_ids(offset, length) return compact([self.fetch_job(job_id) for job_id in job_ids]) @property def job_ids(self): """Returns a list of all job IDS in the queue.""" return self.get_job_ids() @property def jobs(self): """Returns a list of all (valid) jobs in the queue.""" return self.get_jobs() @property def count(self): """Returns a count of all messages in the queue.""" return self.connection.llen(self.key) @property def failed_job_registry(self): """Returns this queue's FailedJobRegistry.""" from rq.registry import FailedJobRegistry return FailedJobRegistry(queue=self, job_class=self.job_class) @property def started_job_registry(self): """Returns this queue's FailedJobRegistry.""" from rq.registry import StartedJobRegistry return StartedJobRegistry(queue=self, job_class=self.job_class) @property def finished_job_registry(self): """Returns this queue's FailedJobRegistry.""" from rq.registry import FinishedJobRegistry return FinishedJobRegistry(queue=self) @property def deferred_job_registry(self): """Returns this queue's FailedJobRegistry.""" from rq.registry import DeferredJobRegistry return DeferredJobRegistry(queue=self, job_class=self.job_class) @property def scheduled_job_registry(self): """Returns this queue's FailedJobRegistry.""" from rq.registry import ScheduledJobRegistry return ScheduledJobRegistry(queue=self, job_class=self.job_class) def remove(self, job_or_id, pipeline=None): """Removes Job from queue, accepts either a Job instance or ID.""" job_id = job_or_id.id if isinstance(job_or_id, self.job_class) else job_or_id if pipeline is not None: pipeline.lrem(self.key, 1, job_id) return return self.connection.lrem(self.key, 1, job_id) def compact(self): """Removes all "dead" jobs from the queue by cycling through it, while guaranteeing FIFO semantics. """ COMPACT_QUEUE = '{0}_compact:{1}'.format( self.redis_queue_namespace_prefix, uuid.uuid4()) # noqa self.connection.rename(self.key, COMPACT_QUEUE) while True: job_id = as_text(self.connection.lpop(COMPACT_QUEUE)) if job_id is None: break if self.job_class.exists(job_id, self.connection): self.connection.rpush(self.key, job_id) def push_job_id(self, job_id, pipeline=None, at_front=False): """Pushes a job ID on the corresponding Redis queue. 'at_front' allows you to push the job onto the front instead of the back of the queue""" connection = pipeline if pipeline is not None else self.connection if at_front: connection.lpush(self.key, job_id) else: connection.rpush(self.key, job_id) def create_job(self, func, args=None, kwargs=None, timeout=None, result_ttl=None, ttl=None, failure_ttl=None, description=None, depends_on=None, job_id=None, meta=None, status=JobStatus.QUEUED): """Creates a job based on parameters given.""" timeout = parse_timeout(timeout) or self._default_timeout job = self.job_class.create( func, args=args, kwargs=kwargs, connection=self.connection, result_ttl=result_ttl, ttl=ttl, failure_ttl=failure_ttl, status=status, description=description, depends_on=depends_on, timeout=timeout, id=job_id, origin=self.name, meta=meta ) return job def enqueue_call(self, func, args=None, kwargs=None, timeout=None, result_ttl=None, ttl=None, failure_ttl=None, description=None, depends_on=None, job_id=None, at_front=False, meta=None): """Creates a job to represent the delayed function call and enqueues it. It is much like `.enqueue()`, except that it takes the function's args and kwargs as explicit arguments. Any kwargs passed to this function contain options for RQ itself. """ timeout = parse_timeout(timeout) if timeout is None: timeout = self._default_timeout elif timeout == 0: raise ValueError('0 timeout is not allowed. Use -1 for infinite timeout') result_ttl = parse_timeout(result_ttl) failure_ttl = parse_timeout(failure_ttl) ttl = parse_timeout(ttl) if ttl is not None and ttl <= 0: raise ValueError('Job ttl must be greater than 0') job = self.job_class.create( func, args=args, kwargs=kwargs, connection=self.connection, result_ttl=result_ttl, ttl=ttl, failure_ttl=failure_ttl, description=description, depends_on=depends_on, origin=self.name, id=job_id, meta=meta, status=JobStatus.QUEUED, timeout=timeout, ) # If a _dependent_ job depends on any unfinished job, register all the # _dependent_ job's dependencies instead of enqueueing it. # # `Job#fetch_dependencies` sets WATCH on all dependencies. If # WatchError is raised in the when the pipeline is executed, that means # something else has modified either the set of dependencies or the # status of one of them. In this case, we simply retry. if depends_on is not None: with self.connection.pipeline() as pipe: while True: try: pipe.watch(job.dependencies_key) dependencies = job.fetch_dependencies( watch=True, pipeline=pipe ) pipe.multi() for dependency in dependencies: if dependency.get_status(refresh=False) != JobStatus.FINISHED: job.set_status(JobStatus.DEFERRED, pipeline=pipe) job.register_dependency(pipeline=pipe) job.save(pipeline=pipe) job.cleanup(ttl=job.ttl, pipeline=pipe) pipe.execute() return job break except WatchError: continue job = self.enqueue_job(job, at_front=at_front) return job def run_job(self, job): job.perform() job.set_status(JobStatus.FINISHED) job.save(include_meta=False) job.cleanup(DEFAULT_RESULT_TTL) return job def enqueue(self, f, *args, **kwargs): """Creates a job to represent the delayed function call and enqueues it. Expects the function to call, along with the arguments and keyword arguments. The function argument `f` may be any of the following: * A reference to a function * A reference to an object's instance method * A string, representing the location of a function (must be meaningful to the import context of the workers) """ if not isinstance(f, string_types) and f.__module__ == '__main__': raise ValueError('Functions from the __main__ module cannot be processed ' 'by workers') # Detect explicit invocations, i.e. of the form: # q.enqueue(foo, args=(1, 2), kwargs={'a': 1}, job_timeout=30) timeout = kwargs.pop('job_timeout', None) description = kwargs.pop('description', None) result_ttl = kwargs.pop('result_ttl', None) ttl = kwargs.pop('ttl', None) failure_ttl = kwargs.pop('failure_ttl', None) depends_on = kwargs.pop('depends_on', None) job_id = kwargs.pop('job_id', None) at_front = kwargs.pop('at_front', False) meta = kwargs.pop('meta', None) if 'args' in kwargs or 'kwargs' in kwargs: assert args == (), 'Extra positional arguments cannot be used when using explicit args and kwargs' # noqa args = kwargs.pop('args', None) kwargs = kwargs.pop('kwargs', None) return self.enqueue_call( func=f, args=args, kwargs=kwargs, timeout=timeout, result_ttl=result_ttl, ttl=ttl, failure_ttl=failure_ttl, description=description, depends_on=depends_on, job_id=job_id, at_front=at_front, meta=meta ) def enqueue_at(self, datetime, func, *args, **kwargs): """Schedules a job to be enqueued at specified time""" from .registry import ScheduledJobRegistry job = self.create_job(func, status=JobStatus.SCHEDULED, *args, **kwargs) registry = ScheduledJobRegistry(queue=self) with self.connection.pipeline() as pipeline: job.save(pipeline=pipeline) registry.schedule(job, datetime, pipeline=pipeline) pipeline.execute() return job def enqueue_in(self, time_delta, func, *args, **kwargs): """Schedules a job to be executed in a given `timedelta` object""" return self.enqueue_at(datetime.now(utc) + time_delta, func, *args, **kwargs) def enqueue_job(self, job, pipeline=None, at_front=False): """Enqueues a job for delayed execution. If Queue is instantiated with is_async=False, job is executed immediately. """ pipe = pipeline if pipeline is not None else self.connection.pipeline() # Add Queue key set pipe.sadd(self.redis_queues_keys, self.key) job.set_status(JobStatus.QUEUED, pipeline=pipe) job.origin = self.name job.enqueued_at = utcnow() if job.timeout is None: job.timeout = self._default_timeout job.save(pipeline=pipe) job.cleanup(ttl=job.ttl, pipeline=pipe) if self._is_async: self.push_job_id(job.id, pipeline=pipe, at_front=at_front) if pipeline is None: pipe.execute() if not self._is_async: job = self.run_job(job) return job def enqueue_dependents(self, job, pipeline=None): """Enqueues all jobs in the given job's dependents set and clears it. When called without a pipeline, this method uses WATCH/MULTI/EXEC. If you pass a pipeline, only MULTI is called. The rest is up to the caller. """ from .registry import DeferredJobRegistry pipe = pipeline if pipeline is not None else self.connection.pipeline() dependents_key = job.dependents_key while True: try: # if a pipeline is passed, the caller is responsible for calling WATCH # to ensure all jobs are enqueued if pipeline is None: pipe.watch(dependents_key) dependent_jobs = [self.job_class.fetch(as_text(job_id), connection=self.connection) for job_id in pipe.smembers(dependents_key)] pipe.multi() for dependent in dependent_jobs: registry = DeferredJobRegistry(dependent.origin, self.connection, job_class=self.job_class) registry.remove(dependent, pipeline=pipe) if dependent.origin == self.name: self.enqueue_job(dependent, pipeline=pipe) else: queue = self.__class__(name=dependent.origin, connection=self.connection) queue.enqueue_job(dependent, pipeline=pipe) pipe.delete(dependents_key) if pipeline is None: pipe.execute() break except WatchError: if pipeline is None: continue else: # if the pipeline comes from the caller, we re-raise the # exception as it it the responsibility of the caller to # handle it raise def pop_job_id(self): """Pops a given job ID from this Redis queue.""" return as_text(self.connection.lpop(self.key)) @classmethod def lpop(cls, queue_keys, timeout, connection=None): """Helper method. Intermediate method to abstract away from some Redis API details, where LPOP accepts only a single key, whereas BLPOP accepts multiple. So if we want the non-blocking LPOP, we need to iterate over all queues, do individual LPOPs, and return the result. Until Redis receives a specific method for this, we'll have to wrap it this way. The timeout parameter is interpreted as follows: None - non-blocking (return immediately) > 0 - maximum number of seconds to block """ connection = resolve_connection(connection) if timeout is not None: # blocking variant if timeout == 0: raise ValueError('RQ does not support indefinite timeouts. Please pick a timeout value > 0') result = connection.blpop(queue_keys, timeout) if result is None: raise DequeueTimeout(timeout, queue_keys) queue_key, job_id = result return queue_key, job_id else: # non-blocking variant for queue_key in queue_keys: blob = connection.lpop(queue_key) if blob is not None: return queue_key, blob return None @classmethod def dequeue_any(cls, queues, timeout, connection=None, job_class=None): """Class method returning the job_class instance at the front of the given set of Queues, where the order of the queues is important. When all of the Queues are empty, depending on the `timeout` argument, either blocks execution of this function for the duration of the timeout or until new messages arrive on any of the queues, or returns None. See the documentation of cls.lpop for the interpretation of timeout. """ job_class = backend_class(cls, 'job_class', override=job_class) while True: queue_keys = [q.key for q in queues] result = cls.lpop(queue_keys, timeout, connection=connection) if result is None: return None queue_key, job_id = map(as_text, result) queue = cls.from_queue_key(queue_key, connection=connection, job_class=job_class) try: job = job_class.fetch(job_id, connection=connection) except NoSuchJobError: # Silently pass on jobs that don't exist (anymore), # and continue in the look continue except UnpickleError as e: # Attach queue information on the exception for improved error # reporting e.job_id = job_id e.queue = queue raise e return job, queue return None, None # Total ordering defition (the rest of the required Python methods are # auto-generated by the @total_ordering decorator) def __eq__(self, other): # noqa if not isinstance(other, Queue): raise TypeError('Cannot compare queues to other objects') return self.name == other.name def __lt__(self, other): if not isinstance(other, Queue): raise TypeError('Cannot compare queues to other objects') return self.name < other.name def __hash__(self): # pragma: no cover return hash(self.name) def __repr__(self): # noqa # pragma: no cover return '{0}({1!r})'.format(self.__class__.__name__, self.name) def __str__(self): return '<{0} {1}>'.format(self.__class__.__name__, self.name) rq-1.2.2/rq/worker.py0000644000076500000240000012002613611564137015077 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) import errno import logging import os import random import signal import socket import sys import time import traceback import warnings from datetime import timedelta from uuid import uuid4 try: from signal import SIGKILL except ImportError: from signal import SIGTERM as SIGKILL from redis import WatchError from . import worker_registration from .compat import PY2, as_text, string_types, text_type from .connections import get_current_connection, push_connection, pop_connection from .defaults import (DEFAULT_RESULT_TTL, DEFAULT_WORKER_TTL, DEFAULT_JOB_MONITORING_INTERVAL, DEFAULT_LOGGING_FORMAT, DEFAULT_LOGGING_DATE_FORMAT) from .exceptions import DequeueTimeout, ShutDownImminentException from .job import Job, JobStatus from .logutils import setup_loghandlers from .queue import Queue from .registry import FailedJobRegistry, StartedJobRegistry, clean_registries from .scheduler import RQScheduler from .suspension import is_suspended from .timeouts import JobTimeoutException, HorseMonitorTimeoutException, UnixSignalDeathPenalty from .utils import (backend_class, ensure_list, enum, make_colorizer, utcformat, utcnow, utcparse) from .version import VERSION from .worker_registration import clean_worker_registry, get_keys try: from setproctitle import setproctitle as setprocname except ImportError: def setprocname(*args, **kwargs): # noqa pass green = make_colorizer('darkgreen') yellow = make_colorizer('darkyellow') blue = make_colorizer('darkblue') logger = logging.getLogger(__name__) class StopRequested(Exception): pass def compact(l): return [x for x in l if x is not None] _signames = dict((getattr(signal, signame), signame) for signame in dir(signal) if signame.startswith('SIG') and '_' not in signame) def signal_name(signum): try: if sys.version_info[:2] >= (3, 5): return signal.Signals(signum).name else: return _signames[signum] except KeyError: return 'SIG_UNKNOWN' except ValueError: return 'SIG_UNKNOWN' WorkerStatus = enum( 'WorkerStatus', STARTED='started', SUSPENDED='suspended', BUSY='busy', IDLE='idle' ) class Worker(object): redis_worker_namespace_prefix = 'rq:worker:' redis_workers_keys = worker_registration.REDIS_WORKER_KEYS death_penalty_class = UnixSignalDeathPenalty queue_class = Queue job_class = Job # `log_result_lifespan` controls whether "Result is kept for XXX seconds" # messages are logged after every job, by default they are. log_result_lifespan = True # `log_job_description` is used to toggle logging an entire jobs description. log_job_description = True @classmethod def all(cls, connection=None, job_class=None, queue_class=None, queue=None): """Returns an iterable of all Workers. """ if queue: connection = queue.connection elif connection is None: connection = get_current_connection() worker_keys = get_keys(queue=queue, connection=connection) workers = [cls.find_by_key(as_text(key), connection=connection, job_class=job_class, queue_class=queue_class) for key in worker_keys] return compact(workers) @classmethod def all_keys(cls, connection=None, queue=None): return [as_text(key) for key in get_keys(queue=queue, connection=connection)] @classmethod def count(cls, connection=None, queue=None): """Returns the number of workers by queue or connection""" return len(get_keys(queue=queue, connection=connection)) @classmethod def find_by_key(cls, worker_key, connection=None, job_class=None, queue_class=None): """Returns a Worker instance, based on the naming conventions for naming the internal Redis keys. Can be used to reverse-lookup Workers by their Redis keys. """ prefix = cls.redis_worker_namespace_prefix if not worker_key.startswith(prefix): raise ValueError('Not a valid RQ worker key: %s' % worker_key) if connection is None: connection = get_current_connection() if not connection.exists(worker_key): connection.srem(cls.redis_workers_keys, worker_key) return None name = worker_key[len(prefix):] worker = cls([], name, connection=connection, job_class=job_class, queue_class=queue_class, prepare_for_work=False) worker.refresh() return worker def __init__(self, queues, name=None, default_result_ttl=DEFAULT_RESULT_TTL, connection=None, exc_handler=None, exception_handlers=None, default_worker_ttl=DEFAULT_WORKER_TTL, job_class=None, queue_class=None, log_job_description=True, job_monitoring_interval=DEFAULT_JOB_MONITORING_INTERVAL, disable_default_exception_handler=False, prepare_for_work=True): # noqa if connection is None: connection = get_current_connection() self.connection = connection if prepare_for_work: self.hostname = socket.gethostname() self.pid = os.getpid() else: self.hostname = None self.pid = None self.job_class = backend_class(self, 'job_class', override=job_class) self.queue_class = backend_class(self, 'queue_class', override=queue_class) self.version = VERSION self.python_version = sys.version queues = [self.queue_class(name=q, connection=connection, job_class=self.job_class) if isinstance(q, string_types) else q for q in ensure_list(queues)] self.name = name or uuid4().hex self.queues = queues self.validate_queues() self._exc_handlers = [] self.default_result_ttl = default_result_ttl self.default_worker_ttl = default_worker_ttl self.job_monitoring_interval = job_monitoring_interval self._state = 'starting' self._is_horse = False self._horse_pid = 0 self._stop_requested = False self.log = logger self.log_job_description = log_job_description self.last_cleaned_at = None self.successful_job_count = 0 self.failed_job_count = 0 self.total_working_time = 0 self.birth_date = None self.scheduler = None self.disable_default_exception_handler = disable_default_exception_handler if isinstance(exception_handlers, list): for handler in exception_handlers: self.push_exc_handler(handler) elif exception_handlers is not None: self.push_exc_handler(exception_handlers) def validate_queues(self): """Sanity check for the given queues.""" for queue in self.queues: if not isinstance(queue, self.queue_class): raise TypeError('{0} is not of type {1} or string types'.format(queue, self.queue_class)) def queue_names(self): """Returns the queue names of this worker's queues.""" return [queue.name for queue in self.queues] def queue_keys(self): """Returns the Redis keys representing this worker's queues.""" return [queue.key for queue in self.queues] @property def key(self): """Returns the worker's Redis hash key.""" return self.redis_worker_namespace_prefix + self.name @property def horse_pid(self): """The horse's process ID. Only available in the worker. Will return 0 in the horse part of the fork. """ return self._horse_pid @property def is_horse(self): """Returns whether or not this is the worker or the work horse.""" return self._is_horse def procline(self, message): """Changes the current procname for the process. This can be used to make `ps -ef` output more readable. """ setprocname('rq: {0}'.format(message)) def register_birth(self): """Registers its own birth.""" self.log.debug('Registering birth of worker %s', self.name) if self.connection.exists(self.key) and \ not self.connection.hexists(self.key, 'death'): msg = 'There exists an active worker named {0!r} already' raise ValueError(msg.format(self.name)) key = self.key queues = ','.join(self.queue_names()) with self.connection.pipeline() as p: p.delete(key) now = utcnow() now_in_string = utcformat(now) self.birth_date = now p.hmset(key, { 'birth': now_in_string, 'last_heartbeat': now_in_string, 'queues': queues, 'pid': self.pid, 'hostname': self.hostname, 'version': self.version, 'python_version': self.python_version, }) worker_registration.register(self, p) p.expire(key, self.default_worker_ttl) p.execute() def register_death(self): """Registers its own death.""" self.log.debug('Registering death') with self.connection.pipeline() as p: # We cannot use self.state = 'dead' here, because that would # rollback the pipeline worker_registration.unregister(self, p) p.hset(self.key, 'death', utcformat(utcnow())) p.expire(self.key, 60) p.execute() def set_shutdown_requested_date(self): """Sets the date on which the worker received a (warm) shutdown request""" self.connection.hset(self.key, 'shutdown_requested_date', utcformat(utcnow())) # @property # def birth_date(self): # """Fetches birth date from Redis.""" # birth_timestamp = self.connection.hget(self.key, 'birth') # if birth_timestamp is not None: # return utcparse(as_text(birth_timestamp)) @property def shutdown_requested_date(self): """Fetches shutdown_requested_date from Redis.""" shutdown_requested_timestamp = self.connection.hget(self.key, 'shutdown_requested_date') if shutdown_requested_timestamp is not None: return utcparse(as_text(shutdown_requested_timestamp)) @property def death_date(self): """Fetches death date from Redis.""" death_timestamp = self.connection.hget(self.key, 'death') if death_timestamp is not None: return utcparse(as_text(death_timestamp)) def set_state(self, state, pipeline=None): self._state = state connection = pipeline if pipeline is not None else self.connection connection.hset(self.key, 'state', state) def _set_state(self, state): """Raise a DeprecationWarning if ``worker.state = X`` is used""" warnings.warn( "worker.state is deprecated, use worker.set_state() instead.", DeprecationWarning ) self.set_state(state) def get_state(self): return self._state def _get_state(self): """Raise a DeprecationWarning if ``worker.state == X`` is used""" warnings.warn( "worker.state is deprecated, use worker.get_state() instead.", DeprecationWarning ) return self.get_state() state = property(_get_state, _set_state) def set_current_job_id(self, job_id, pipeline=None): connection = pipeline if pipeline is not None else self.connection if job_id is None: connection.hdel(self.key, 'current_job') else: connection.hset(self.key, 'current_job', job_id) def get_current_job_id(self, pipeline=None): connection = pipeline if pipeline is not None else self.connection return as_text(connection.hget(self.key, 'current_job')) def get_current_job(self): """Returns the job id of the currently executing job.""" job_id = self.get_current_job_id() if job_id is None: return None return self.job_class.fetch(job_id, self.connection) def _install_signal_handlers(self): """Installs signal handlers for handling SIGINT and SIGTERM gracefully. """ signal.signal(signal.SIGINT, self.request_stop) signal.signal(signal.SIGTERM, self.request_stop) def kill_horse(self, sig=SIGKILL): """ Kill the horse but catch "No such process" error has the horse could already be dead. """ try: os.kill(self.horse_pid, sig) except OSError as e: if e.errno == errno.ESRCH: # "No such process" is fine with us self.log.debug('Horse already dead') else: raise def request_force_stop(self, signum, frame): """Terminates the application (cold shutdown). """ self.log.warning('Cold shut down') # Take down the horse with the worker if self.horse_pid: self.log.debug('Taking down horse %s with me', self.horse_pid) self.kill_horse() raise SystemExit() def request_stop(self, signum, frame): """Stops the current worker loop but waits for child processes to end gracefully (warm shutdown). """ self.log.debug('Got signal %s', signal_name(signum)) signal.signal(signal.SIGINT, self.request_force_stop) signal.signal(signal.SIGTERM, self.request_force_stop) self.handle_warm_shutdown_request() # If shutdown is requested in the middle of a job, wait until # finish before shutting down and save the request in redis if self.get_state() == WorkerStatus.BUSY: self._stop_requested = True self.set_shutdown_requested_date() self.log.debug('Stopping after current horse is finished. ' 'Press Ctrl+C again for a cold shutdown.') if self.scheduler: self.stop_scheduler() else: if self.scheduler: self.stop_scheduler() raise StopRequested() def handle_warm_shutdown_request(self): self.log.info('Warm shut down requested') def check_for_suspension(self, burst): """Check to see if workers have been suspended by `rq suspend`""" before_state = None notified = False while not self._stop_requested and is_suspended(self.connection, self): if burst: self.log.info('Suspended in burst mode, exiting') self.log.info('Note: There could still be unfinished jobs on the queue') raise StopRequested if not notified: self.log.info('Worker suspended, run `rq resume` to resume') before_state = self.get_state() self.set_state(WorkerStatus.SUSPENDED) notified = True time.sleep(1) if before_state: self.set_state(before_state) def run_maintenance_tasks(self): """ Runs periodic maintenance tasks, these include: 1. Check if scheduler should be started. This check should not be run on first run since worker.work() already calls `scheduler.enqueue_scheduled_jobs()` on startup. 2. Cleaning registries """ # No need to try to start scheduler on first run if self.last_cleaned_at: if self.scheduler and not self.scheduler._process: self.scheduler.acquire_locks(auto_start=True) self.clean_registries() def work(self, burst=False, logging_level="INFO", date_format=DEFAULT_LOGGING_DATE_FORMAT, log_format=DEFAULT_LOGGING_FORMAT, max_jobs=None, with_scheduler=False): """Starts the work loop. Pops and performs all jobs on the current list of queues. When all queues are empty, block and wait for new jobs to arrive on any of the queues, unless `burst` mode is enabled. The return value indicates whether any jobs were processed. """ setup_loghandlers(logging_level, date_format, log_format) completed_jobs = 0 self.register_birth() self.log.info("Worker %s: started, version %s", self.key, VERSION) self.set_state(WorkerStatus.STARTED) qnames = self.queue_names() self.log.info('*** Listening on %s...', green(', '.join(qnames))) if with_scheduler: self.scheduler = RQScheduler(self.queues, connection=self.connection) self.scheduler.acquire_locks() # If lock is acquired, start scheduler if self.scheduler.acquired_locks: # If worker is run on burst mode, enqueue_scheduled_jobs() # before working. Otherwise, start scheduler in a separate process if burst: self.scheduler.enqueue_scheduled_jobs() else: self.scheduler.start() self._install_signal_handlers() try: while True: try: self.check_for_suspension(burst) if self.should_run_maintenance_tasks: self.run_maintenance_tasks() if self._stop_requested: self.log.info('Worker %s: stopping on request', self.key) break timeout = None if burst else max(1, self.default_worker_ttl - 15) result = self.dequeue_job_and_maintain_ttl(timeout) if result is None: if burst: self.log.info("Worker %s: done, quitting", self.key) break job, queue = result self.execute_job(job, queue) self.heartbeat() completed_jobs += 1 if max_jobs is not None: if completed_jobs >= max_jobs: self.log.info( "Worker %s: finished executing %d jobs, quitting", self.key, completed_jobs ) break except StopRequested: break except SystemExit: # Cold shutdown detected raise except: # noqa self.log.error( 'Worker %s: found an unhandled exception, quitting...', self.key, exc_info=True ) break finally: if not self.is_horse: if self.scheduler: self.stop_scheduler() self.register_death() return bool(completed_jobs) def stop_scheduler(self): """Ensure scheduler process is stopped""" if self.scheduler._process and self.scheduler._process.pid: # Send the kill signal to scheduler process try: os.kill(self.scheduler._process.pid, signal.SIGTERM) except OSError: pass self.scheduler._process.join() def dequeue_job_and_maintain_ttl(self, timeout): result = None qnames = ','.join(self.queue_names()) self.set_state(WorkerStatus.IDLE) self.procline('Listening on ' + qnames) self.log.debug('*** Listening on %s...', green(qnames)) while True: self.heartbeat() if self.should_run_maintenance_tasks: self.run_maintenance_tasks() try: result = self.queue_class.dequeue_any(self.queues, timeout, connection=self.connection, job_class=self.job_class) if result is not None: job, queue = result if self.log_job_description: self.log.info( '%s: %s (%s)', green(queue.name), blue(job.description), job.id) else: self.log.info('%s: %s', green(queue.name), job.id) break except DequeueTimeout: pass self.heartbeat() return result def heartbeat(self, timeout=None, pipeline=None): """Specifies a new worker timeout, typically by extending the expiration time of the worker, effectively making this a "heartbeat" to not expire the worker until the timeout passes. The next heartbeat should come before this time, or the worker will die (at least from the monitoring dashboards). If no timeout is given, the default_worker_ttl will be used to update the expiration time of the worker. """ timeout = timeout or self.default_worker_ttl connection = pipeline if pipeline is not None else self.connection connection.expire(self.key, timeout) connection.hset(self.key, 'last_heartbeat', utcformat(utcnow())) self.log.debug('Sent heartbeat to prevent worker timeout. ' 'Next one should arrive within %s seconds.', timeout) def refresh(self): data = self.connection.hmget( self.key, 'queues', 'state', 'current_job', 'last_heartbeat', 'birth', 'failed_job_count', 'successful_job_count', 'total_working_time', 'hostname', 'pid', 'version', 'python_version', ) (queues, state, job_id, last_heartbeat, birth, failed_job_count, successful_job_count, total_working_time, hostname, pid, version, python_version) = data queues = as_text(queues) self.hostname = hostname self.pid = int(pid) if pid else None self.version = as_text(version) self.python_version = as_text(python_version) self._state = as_text(state or '?') self._job_id = job_id or None if last_heartbeat: self.last_heartbeat = utcparse(as_text(last_heartbeat)) else: self.last_heartbeat = None if birth: self.birth_date = utcparse(as_text(birth)) else: self.birth_date = None if failed_job_count: self.failed_job_count = int(as_text(failed_job_count)) if successful_job_count: self.successful_job_count = int(as_text(successful_job_count)) if total_working_time: self.total_working_time = float(as_text(total_working_time)) if queues: self.queues = [self.queue_class(queue, connection=self.connection, job_class=self.job_class) for queue in queues.split(',')] def increment_failed_job_count(self, pipeline=None): connection = pipeline if pipeline is not None else self.connection connection.hincrby(self.key, 'failed_job_count', 1) def increment_successful_job_count(self, pipeline=None): connection = pipeline if pipeline is not None else self.connection connection.hincrby(self.key, 'successful_job_count', 1) def increment_total_working_time(self, job_execution_time, pipeline): pipeline.hincrbyfloat(self.key, 'total_working_time', job_execution_time.total_seconds()) def fork_work_horse(self, job, queue): """Spawns a work horse to perform the actual work and passes it a job. """ child_pid = os.fork() os.environ['RQ_WORKER_ID'] = self.name os.environ['RQ_JOB_ID'] = job.id if child_pid == 0: self.main_work_horse(job, queue) else: self._horse_pid = child_pid self.procline('Forked {0} at {1}'.format(child_pid, time.time())) def monitor_work_horse(self, job): """The worker will monitor the work horse and make sure that it either executes successfully or the status of the job is set to failed """ ret_val = None job.started_at = job.started_at or utcnow() while True: try: with UnixSignalDeathPenalty(self.job_monitoring_interval, HorseMonitorTimeoutException): retpid, ret_val = os.waitpid(self._horse_pid, 0) break except HorseMonitorTimeoutException: # Horse has not exited yet and is still running. # Send a heartbeat to keep the worker alive. self.heartbeat(self.job_monitoring_interval + 5) # Kill the job from this side if something is really wrong (interpreter lock/etc). if (utcnow() - job.started_at).total_seconds() > (job.timeout + 1): self.kill_horse() break except OSError as e: # In case we encountered an OSError due to EINTR (which is # caused by a SIGINT or SIGTERM signal during # os.waitpid()), we simply ignore it and enter the next # iteration of the loop, waiting for the child to end. In # any other case, this is some other unexpected OS error, # which we don't want to catch, so we re-raise those ones. if e.errno != errno.EINTR: raise # Send a heartbeat to keep the worker alive. self.heartbeat() if ret_val == os.EX_OK: # The process exited normally. return job_status = job.get_status() if job_status is None: # Job completed and its ttl has expired return if job_status not in [JobStatus.FINISHED, JobStatus.FAILED]: if not job.ended_at: job.ended_at = utcnow() # Unhandled failure: move the job to the failed queue self.log.warning(( 'Moving job to FailedJobRegistry ' '(work-horse terminated unexpectedly; waitpid returned {})' ).format(ret_val)) self.handle_job_failure( job, exc_string="Work-horse process was terminated unexpectedly " "(waitpid returned %s)" % ret_val ) def execute_job(self, job, queue): """Spawns a work horse to perform the actual work and passes it a job. The worker will wait for the work horse and make sure it executes within the given timeout bounds, or will end the work horse with SIGALRM. """ self.set_state(WorkerStatus.BUSY) self.fork_work_horse(job, queue) self.monitor_work_horse(job) self.set_state(WorkerStatus.IDLE) def main_work_horse(self, job, queue): """This is the entry point of the newly spawned work horse.""" # After fork()'ing, always assure we are generating random sequences # that are different from the worker. random.seed() try: self.setup_work_horse_signals() self._is_horse = True self.log = logger self.perform_job(job, queue) except Exception as e: # noqa # Horse does not terminate properly raise e os._exit(1) # os._exit() is the way to exit from childs after a fork(), in # constrast to the regular sys.exit() os._exit(0) def setup_work_horse_signals(self): """Setup signal handing for the newly spawned work horse.""" # Always ignore Ctrl+C in the work horse, as it might abort the # currently running job. # The main worker catches the Ctrl+C and requests graceful shutdown # after the current work is done. When cold shutdown is requested, it # kills the current job anyway. signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_DFL) def prepare_job_execution(self, job, heartbeat_ttl=None): """Performs misc bookkeeping like updating states prior to job execution. """ if job.timeout == -1: timeout = -1 else: timeout = job.timeout or 180 if heartbeat_ttl is None: heartbeat_ttl = self.job_monitoring_interval + 5 with self.connection.pipeline() as pipeline: self.set_state(WorkerStatus.BUSY, pipeline=pipeline) self.set_current_job_id(job.id, pipeline=pipeline) self.heartbeat(heartbeat_ttl, pipeline=pipeline) registry = StartedJobRegistry(job.origin, self.connection, job_class=self.job_class) registry.add(job, timeout, pipeline=pipeline) job.set_status(JobStatus.STARTED, pipeline=pipeline) pipeline.hset(job.key, 'started_at', utcformat(utcnow())) pipeline.execute() msg = 'Processing {0} from {1} since {2}' self.procline(msg.format(job.func_name, job.origin, time.time())) def handle_job_failure(self, job, started_job_registry=None, exc_string=''): """Handles the failure or an executing job by: 1. Setting the job status to failed 2. Removing the job from StartedJobRegistry 3. Setting the workers current job to None 4. Add the job to FailedJobRegistry """ self.log.debug('Handling failed execution of job %s', job.id) with self.connection.pipeline() as pipeline: if started_job_registry is None: started_job_registry = StartedJobRegistry( job.origin, self.connection, job_class=self.job_class ) job.set_status(JobStatus.FAILED, pipeline=pipeline) started_job_registry.remove(job, pipeline=pipeline) if not self.disable_default_exception_handler: failed_job_registry = FailedJobRegistry(job.origin, job.connection, job_class=self.job_class) failed_job_registry.add(job, ttl=job.failure_ttl, exc_string=exc_string, pipeline=pipeline) self.set_current_job_id(None, pipeline=pipeline) self.increment_failed_job_count(pipeline) if job.started_at and job.ended_at: self.increment_total_working_time( job.ended_at - job.started_at, pipeline ) try: pipeline.execute() except Exception: # Ensure that custom exception handlers are called # even if Redis is down pass def handle_job_success(self, job, queue, started_job_registry): self.log.debug('Handling successful execution of job %s', job.id) with self.connection.pipeline() as pipeline: while True: try: # if dependencies are inserted after enqueue_dependents # a WatchError is thrown by execute() pipeline.watch(job.dependents_key) # enqueue_dependents calls multi() on the pipeline! queue.enqueue_dependents(job, pipeline=pipeline) self.set_current_job_id(None, pipeline=pipeline) self.increment_successful_job_count(pipeline=pipeline) self.increment_total_working_time( job.ended_at - job.started_at, pipeline ) result_ttl = job.get_result_ttl(self.default_result_ttl) if result_ttl != 0: job.set_status(JobStatus.FINISHED, pipeline=pipeline) # Don't clobber the user's meta dictionary! job.save(pipeline=pipeline, include_meta=False) finished_job_registry = queue.finished_job_registry finished_job_registry.add(job, result_ttl, pipeline) job.cleanup(result_ttl, pipeline=pipeline, remove_from_queue=False) started_job_registry.remove(job, pipeline=pipeline) pipeline.execute() break except WatchError: continue def perform_job(self, job, queue, heartbeat_ttl=None): """Performs the actual work of a job. Will/should only be called inside the work horse's process. """ self.prepare_job_execution(job, heartbeat_ttl) push_connection(self.connection) started_job_registry = queue.started_job_registry try: job.started_at = utcnow() timeout = job.timeout or self.queue_class.DEFAULT_TIMEOUT with self.death_penalty_class(timeout, JobTimeoutException, job_id=job.id): rv = job.perform() job.ended_at = utcnow() # Pickle the result in the same try-except block since we need # to use the same exc handling when pickling fails job._result = rv self.handle_job_success(job=job, queue=queue, started_job_registry=started_job_registry) except: # NOQA job.ended_at = utcnow() exc_info = sys.exc_info() exc_string = self._get_safe_exception_string( traceback.format_exception(*exc_info) ) self.handle_job_failure(job=job, exc_string=exc_string, started_job_registry=started_job_registry) self.handle_exception(job, *exc_info) return False finally: pop_connection() self.log.info('%s: %s (%s)', green(job.origin), blue('Job OK'), job.id) if rv is not None: log_result = "{0!r}".format(as_text(text_type(rv))) self.log.debug('Result: %s', yellow(log_result)) if self.log_result_lifespan: result_ttl = job.get_result_ttl(self.default_result_ttl) if result_ttl == 0: self.log.info('Result discarded immediately') elif result_ttl > 0: self.log.info('Result is kept for %s seconds', result_ttl) else: self.log.info('Result will never expire, clean up result key manually') return True def handle_exception(self, job, *exc_info): """Walks the exception handler stack to delegate exception handling.""" exc_string = Worker._get_safe_exception_string( traceback.format_exception_only(*exc_info[:2]) + traceback.format_exception(*exc_info) ) self.log.error(exc_string, exc_info=True, extra={ 'func': job.func_name, 'arguments': job.args, 'kwargs': job.kwargs, 'queue': job.origin, }) for handler in self._exc_handlers: self.log.debug('Invoking exception handler %s', handler) fallthrough = handler(job, *exc_info) # Only handlers with explicit return values should disable further # exc handling, so interpret a None return value as True. if fallthrough is None: fallthrough = True if not fallthrough: break @staticmethod def _get_safe_exception_string(exc_strings): """Ensure list of exception strings is decoded on Python 2 and joined as one string safely.""" if sys.version_info[0] < 3: try: exc_strings = [exc.decode("utf-8") for exc in exc_strings] except ValueError: exc_strings = [exc.decode("latin-1") for exc in exc_strings] return ''.join(exc_strings) def push_exc_handler(self, handler_func): """Pushes an exception handler onto the exc handler stack.""" self._exc_handlers.append(handler_func) def pop_exc_handler(self): """Pops the latest exception handler off of the exc handler stack.""" return self._exc_handlers.pop() def __eq__(self, other): """Equality does not take the database/connection into account""" if not isinstance(other, self.__class__): raise TypeError('Cannot compare workers to other types (of workers)') return self.name == other.name def __hash__(self): """The hash does not take the database/connection into account""" return hash(self.name) def clean_registries(self): """Runs maintenance jobs on each Queue's registries.""" for queue in self.queues: # If there are multiple workers running, we only want 1 worker # to run clean_registries(). if queue.acquire_cleaning_lock(): self.log.info('Cleaning registries for queue: %s', queue.name) clean_registries(queue) clean_worker_registry(queue) self.last_cleaned_at = utcnow() @property def should_run_maintenance_tasks(self): """Maintenance tasks should run on first startup or 15 minutes.""" if self.last_cleaned_at is None: return True if (utcnow() - self.last_cleaned_at) > timedelta(minutes=15): return True return False class SimpleWorker(Worker): def main_work_horse(self, *args, **kwargs): raise NotImplementedError("Test worker does not implement this method") def execute_job(self, job, queue): """Execute job in same thread/process, do not fork()""" timeout = (job.timeout or DEFAULT_WORKER_TTL) + 5 return self.perform_job(job, queue, heartbeat_ttl=timeout) class HerokuWorker(Worker): """ Modified version of rq worker which: * stops work horses getting killed with SIGTERM * sends SIGRTMIN to work horses on SIGTERM to the main process which in turn causes the horse to crash `imminent_shutdown_delay` seconds later """ imminent_shutdown_delay = 6 frame_properties = ['f_code', 'f_lasti', 'f_lineno', 'f_locals', 'f_trace'] if PY2: frame_properties.extend( ['f_exc_traceback', 'f_exc_type', 'f_exc_value', 'f_restricted'] ) def setup_work_horse_signals(self): """Modified to ignore SIGINT and SIGTERM and only handle SIGRTMIN""" signal.signal(signal.SIGRTMIN, self.request_stop_sigrtmin) signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) def handle_warm_shutdown_request(self): """If horse is alive send it SIGRTMIN""" if self.horse_pid != 0: self.log.info( 'Worker %s: warm shut down requested, sending horse SIGRTMIN signal', self.key ) self.kill_horse(sig=signal.SIGRTMIN) else: self.log.warning('Warm shut down requested, no horse found') def request_stop_sigrtmin(self, signum, frame): if self.imminent_shutdown_delay == 0: self.log.warning('Imminent shutdown, raising ShutDownImminentException immediately') self.request_force_stop_sigrtmin(signum, frame) else: self.log.warning('Imminent shutdown, raising ShutDownImminentException in %d seconds', self.imminent_shutdown_delay) signal.signal(signal.SIGRTMIN, self.request_force_stop_sigrtmin) signal.signal(signal.SIGALRM, self.request_force_stop_sigrtmin) signal.alarm(self.imminent_shutdown_delay) def request_force_stop_sigrtmin(self, signum, frame): info = dict((attr, getattr(frame, attr)) for attr in self.frame_properties) self.log.warning('raising ShutDownImminentException to cancel job...') raise ShutDownImminentException('shut down imminent (signal: %s)' % signal_name(signum), info) rq-1.2.2/rq/compat/0000755000076500000240000000000013615036417014475 5ustar selwinstaff00000000000000rq-1.2.2/rq/compat/connections.py0000644000076500000240000000076513566232513017401 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) from functools import partial from redis import Redis def fix_return_type(func): # deliberately no functools.wraps() call here, since the function being # wrapped is a partial, which has no module def _inner(*args, **kwargs): value = func(*args, **kwargs) if value is None: value = -1 return value return _inner rq-1.2.2/rq/compat/__init__.py0000644000076500000240000000636313604001573016607 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) import sys def is_python_version(*versions): for version in versions: if (sys.version_info[0] == version[0] and sys.version_info >= version): return True return False try: from functools import total_ordering except ImportError: def total_ordering(cls): # noqa """Class decorator that fills in missing ordering methods""" convert = { '__lt__': [('__gt__', lambda self, other: other < self), ('__le__', lambda self, other: not other < self), ('__ge__', lambda self, other: not self < other)], '__le__': [('__ge__', lambda self, other: other <= self), ('__lt__', lambda self, other: not other <= self), ('__gt__', lambda self, other: not self <= other)], '__gt__': [('__lt__', lambda self, other: other > self), ('__ge__', lambda self, other: not other > self), ('__le__', lambda self, other: not self > other)], '__ge__': [('__le__', lambda self, other: other >= self), ('__gt__', lambda self, other: not other >= self), ('__lt__', lambda self, other: not self >= other)] } roots = set(dir(cls)) & set(convert) if not roots: raise ValueError('must define at least one ordering operation: < > <= >=') # noqa root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ for opname, opfunc in convert[root]: if opname not in roots: opfunc.__name__ = str(opname) opfunc.__doc__ = getattr(int, opname).__doc__ setattr(cls, opname, opfunc) return cls PY2 = sys.version_info[0] == 2 if not PY2: # Python 3.x and up text_type = str string_types = (str,) def as_text(v): if v is None: return None elif isinstance(v, bytes): return v.decode('utf-8') elif isinstance(v, str): return v else: raise ValueError('Unknown type %r' % type(v)) def decode_redis_hash(h): return dict((as_text(k), h[k]) for k in h) else: # Python 2.x def text_type(v): try: return unicode(v) # noqa except Exception: return unicode(v, "utf-8", errors="ignore") # noqa string_types = (str, unicode) # noqa def as_text(v): if v is None: return None elif isinstance(v, str): return v.decode('utf-8') elif isinstance(v, unicode): # noqa return v else: raise Exception("Input cannot be decoded into literal thing.") def decode_redis_hash(h): return h try: from datetime import timezone utc = timezone.utc except ImportError: # Python 2.x workaround from datetime import timedelta, tzinfo class UTC(tzinfo): def utcoffset(self, dt): return timedelta(0) def tzname(self, dt): return "UTC" def dst(self, dt): return timedelta(0) utc = UTC()rq-1.2.2/rq/compat/dictconfig.py0000644000076500000240000005451513566232513017172 0ustar selwinstaff00000000000000# flake8: noqa # This is a copy of the Python logging.config.dictconfig module. It is # provided here for backwards compatibility for Python versions prior to 2.7. # # Copyright 2009-2010 by Vinay Sajip. All Rights Reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation, and that the name of Vinay Sajip # not be used in advertising or publicity pertaining to distribution # of the software without specific, written prior permission. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import logging.handlers import re import sys import types from rq.compat import string_types IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) def valid_ident(s): m = IDENTIFIER.match(s) if not m: raise ValueError('Not a valid Python identifier: %r' % s) return True # # This function is defined in logging only in recent versions of Python # try: from logging import _checkLevel except ImportError: def _checkLevel(level): if isinstance(level, int): rv = level elif str(level) == level: if level not in logging._levelNames: raise ValueError('Unknown level: %r' % level) rv = logging._levelNames[level] else: raise TypeError('Level not an integer or a ' 'valid string: %r' % level) return rv # The ConvertingXXX classes are wrappers around standard Python containers, # and they serve to convert any suitable values in the container. The # conversion converts base dicts, lists and tuples to their wrapped # equivalents, whereas strings which match a conversion format are converted # appropriately. # # Each wrapper should have a configurator attribute holding the actual # configurator to use for conversion. class ConvertingDict(dict): """A converting dictionary wrapper.""" def __getitem__(self, key): value = dict.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def get(self, key, default=None): value = dict.get(self, key, default) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, key, default=None): value = dict.pop(self, key, default) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class ConvertingList(list): """A converting list wrapper.""" def __getitem__(self, key): value = list.__getitem__(self, key) result = self.configurator.convert(value) #If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result def pop(self, idx=-1): value = list.pop(self, idx) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self return result class ConvertingTuple(tuple): """A converting tuple wrapper.""" def __getitem__(self, key): value = tuple.__getitem__(self, key) result = self.configurator.convert(value) if value is not result: if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): result.parent = self result.key = key return result class BaseConfigurator(object): """ The configurator base class which defines some useful defaults. """ CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') DIGIT_PATTERN = re.compile(r'^\d+$') value_converters = { 'ext' : 'ext_convert', 'cfg' : 'cfg_convert', } # We might want to use a different one, e.g. importlib importer = __import__ def __init__(self, config): self.config = ConvertingDict(config) self.config.configurator = self def resolve(self, s): """ Resolve strings to objects using standard import and attribute syntax. """ name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += '.' + frag try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: e, tb = sys.exc_info()[1:] v = ValueError('Cannot resolve %r: %s' % (s, e)) v.__cause__, v.__traceback__ = e, tb raise v def ext_convert(self, value): """Default converter for the ext:// protocol.""" return self.resolve(value) def cfg_convert(self, value): """Default converter for the cfg:// protocol.""" rest = value m = self.WORD_PATTERN.match(rest) if m is None: raise ValueError("Unable to convert %r" % value) else: rest = rest[m.end():] d = self.config[m.groups()[0]] #print d, rest while rest: m = self.DOT_PATTERN.match(rest) if m: d = d[m.groups()[0]] else: m = self.INDEX_PATTERN.match(rest) if m: idx = m.groups()[0] if not self.DIGIT_PATTERN.match(idx): d = d[idx] else: try: n = int(idx) # try as number first (most likely) d = d[n] except TypeError: d = d[idx] if m: rest = rest[m.end():] else: raise ValueError('Unable to convert ' '%r at %r' % (value, rest)) #rest should be empty return d def convert(self, value): """ Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. """ if not isinstance(value, ConvertingDict) and isinstance(value, dict): value = ConvertingDict(value) value.configurator = self elif not isinstance(value, ConvertingList) and isinstance(value, list): value = ConvertingList(value) value.configurator = self elif not isinstance(value, ConvertingTuple) and\ isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, string_types): # str for py3k m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() prefix = d['prefix'] converter = self.value_converters.get(prefix, None) if converter: suffix = d['suffix'] converter = getattr(self, converter) value = converter(suffix) return value def configure_custom(self, config): """Configure an object with a user-supplied factory.""" c = config.pop('()') if not hasattr(c, '__call__') and type(c) != type: c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) result = c(**kwargs) if props: for name, value in props.items(): setattr(result, name, value) return result def as_tuple(self, value): """Utility function which converts lists to tuples.""" if isinstance(value, list): value = tuple(value) return value class DictConfigurator(BaseConfigurator): """ Configure logging using a dictionary-like object to describe the configuration. """ def configure(self): """Do the configuration.""" config = self.config if 'version' not in config: raise ValueError("dictionary doesn't specify a version") if config['version'] != 1: raise ValueError("Unsupported version: %s" % config['version']) incremental = config.pop('incremental', False) EMPTY_DICT = {} logging._acquireLock() try: if incremental: handlers = config.get('handlers', EMPTY_DICT) # incremental handler config only if handler name # ties in to logging._handlers (Python 2.7) if sys.version_info[:2] == (2, 7): for name in handlers: if name not in logging._handlers: raise ValueError('No handler found with ' 'name %r' % name) else: try: handler = logging._handlers[name] handler_config = handlers[name] level = handler_config.get('level', None) if level: handler.setLevel(_checkLevel(level)) except Exception as e: raise ValueError('Unable to configure handler ' '%r: %s' % (name, e)) loggers = config.get('loggers', EMPTY_DICT) for name in loggers: try: self.configure_logger(name, loggers[name], True) except Exception as e: raise ValueError('Unable to configure logger ' '%r: %s' % (name, e)) root = config.get('root', None) if root: try: self.configure_root(root, True) except Exception as e: raise ValueError('Unable to configure root ' 'logger: %s' % e) else: disable_existing = config.pop('disable_existing_loggers', True) logging._handlers.clear() del logging._handlerList[:] # Do formatters first - they don't refer to anything else formatters = config.get('formatters', EMPTY_DICT) for name in formatters: try: formatters[name] = self.configure_formatter( formatters[name]) except Exception as e: raise ValueError('Unable to configure ' 'formatter %r: %s' % (name, e)) # Next, do filters - they don't refer to anything else, either filters = config.get('filters', EMPTY_DICT) for name in filters: try: filters[name] = self.configure_filter(filters[name]) except Exception as e: raise ValueError('Unable to configure ' 'filter %r: %s' % (name, e)) # Next, do handlers - they refer to formatters and filters # As handlers can refer to other handlers, sort the keys # to allow a deterministic order of configuration handlers = config.get('handlers', EMPTY_DICT) for name in sorted(handlers): try: handler = self.configure_handler(handlers[name]) handler.name = name handlers[name] = handler except Exception as e: raise ValueError('Unable to configure handler ' '%r: %s' % (name, e)) # Next, do loggers - they refer to handlers and filters #we don't want to lose the existing loggers, #since other threads may have pointers to them. #existing is set to contain all existing loggers, #and as we go through the new configuration we #remove any which are configured. At the end, #what's left in existing is the set of loggers #which were in the previous configuration but #which are not in the new configuration. root = logging.root existing = root.manager.loggerDict.keys() #The list needs to be sorted so that we can #avoid disabling child loggers of explicitly #named loggers. With a sorted list it is easier #to find the child loggers. existing.sort() #We'll keep the list of existing loggers #which are children of named loggers here... child_loggers = [] #now set up the new ones... loggers = config.get('loggers', EMPTY_DICT) for name in loggers: if name in existing: i = existing.index(name) prefixed = name + "." pflen = len(prefixed) num_existing = len(existing) i = i + 1 # look at the entry after name while (i < num_existing) and\ (existing[i][:pflen] == prefixed): child_loggers.append(existing[i]) i = i + 1 existing.remove(name) try: self.configure_logger(name, loggers[name]) except Exception as e: raise ValueError('Unable to configure logger ' '%r: %s' % (name, e)) #Disable any old loggers. There's no point deleting #them as other threads may continue to hold references #and by disabling them, you stop them doing any logging. #However, don't disable children of named loggers, as that's #probably not what was intended by the user. for log in existing: logger = root.manager.loggerDict[log] if log in child_loggers: logger.level = logging.NOTSET logger.handlers = [] logger.propagate = True elif disable_existing: logger.disabled = True # And finally, do the root logger root = config.get('root', None) if root: try: self.configure_root(root) except Exception as e: raise ValueError('Unable to configure root ' 'logger: %s' % e) finally: logging._releaseLock() def configure_formatter(self, config): """Configure a formatter from a dictionary.""" if '()' in config: factory = config['()'] # for use in exception handler try: result = self.configure_custom(config) except TypeError as te: if "'format'" not in str(te): raise #Name of parameter changed from fmt to format. #Retry with old name. #This is so that code can be used with older Python versions #(e.g. by Django) config['fmt'] = config.pop('format') config['()'] = factory result = self.configure_custom(config) else: fmt = config.get('format', None) dfmt = config.get('datefmt', None) result = logging.Formatter(fmt, dfmt) return result def configure_filter(self, config): """Configure a filter from a dictionary.""" if '()' in config: result = self.configure_custom(config) else: name = config.get('name', '') result = logging.Filter(name) return result def add_filters(self, filterer, filters): """Add filters to a filterer from a list of names.""" for f in filters: try: filterer.addFilter(self.config['filters'][f]) except Exception as e: raise ValueError('Unable to add filter %r: %s' % (f, e)) def configure_handler(self, config): """Configure a handler from a dictionary.""" formatter = config.pop('formatter', None) if formatter: try: formatter = self.config['formatters'][formatter] except Exception as e: raise ValueError('Unable to set formatter ' '%r: %s' % (formatter, e)) level = config.pop('level', None) filters = config.pop('filters', None) if '()' in config: c = config.pop('()') if not hasattr(c, '__call__') and type(c) != type: c = self.resolve(c) factory = c else: klass = self.resolve(config.pop('class')) #Special case for handler which refers to another handler if issubclass(klass, logging.handlers.MemoryHandler) and\ 'target' in config: try: config['target'] = self.config['handlers'][config['target']] except Exception as e: raise ValueError('Unable to set target handler ' '%r: %s' % (config['target'], e)) elif issubclass(klass, logging.handlers.SMTPHandler) and\ 'mailhost' in config: config['mailhost'] = self.as_tuple(config['mailhost']) elif issubclass(klass, logging.handlers.SysLogHandler) and\ 'address' in config: config['address'] = self.as_tuple(config['address']) factory = klass kwargs = dict([(str(k), config[k]) for k in config if valid_ident(k)]) try: result = factory(**kwargs) except TypeError as te: if "'stream'" not in str(te): raise #The argument name changed from strm to stream #Retry with old name. #This is so that code can be used with older Python versions #(e.g. by Django) kwargs['strm'] = kwargs.pop('stream') result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(_checkLevel(level)) if filters: self.add_filters(result, filters) return result def add_handlers(self, logger, handlers): """Add handlers to a logger from a list of names.""" for h in handlers: try: logger.addHandler(self.config['handlers'][h]) except Exception as e: raise ValueError('Unable to add handler %r: %s' % (h, e)) def common_logger_config(self, logger, config, incremental=False): """ Perform configuration which is common to root and non-root loggers. """ level = config.get('level', None) if level is not None: logger.setLevel(_checkLevel(level)) if not incremental: #Remove any existing handlers for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get('handlers', None) if handlers: self.add_handlers(logger, handlers) filters = config.get('filters', None) if filters: self.add_filters(logger, filters) def configure_logger(self, name, config, incremental=False): """Configure a non-root logger from a dictionary.""" logger = logging.getLogger(name) self.common_logger_config(logger, config, incremental) propagate = config.get('propagate', None) if propagate is not None: logger.propagate = propagate def configure_root(self, config, incremental=False): """Configure a root logger from a dictionary.""" root = logging.getLogger() self.common_logger_config(root, config, incremental) dictConfigClass = DictConfigurator def dictConfig(config): """Configure logging using a dictionary.""" dictConfigClass(config).configure() rq-1.2.2/rq/version.py0000644000076500000240000000023013615036226015242 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) VERSION = '1.2.2' rq-1.2.2/rq/registry.py0000644000076500000240000002667313611571626015454 0ustar selwinstaff00000000000000import calendar import time from datetime import datetime, timedelta from .compat import as_text, utc from .connections import resolve_connection from .defaults import DEFAULT_FAILURE_TTL from .exceptions import InvalidJobOperation, NoSuchJobError from .job import Job, JobStatus from .queue import Queue from .utils import backend_class, current_timestamp class BaseRegistry(object): """ Base implementation of a job registry, implemented in Redis sorted set. Each job is stored as a key in the registry, scored by expiration time (unix timestamp). """ job_class = Job key_template = 'rq:registry:{0}' def __init__(self, name='default', connection=None, job_class=None, queue=None): if queue: self.name = queue.name self.connection = resolve_connection(queue.connection) else: self.name = name self.connection = resolve_connection(connection) self.key = self.key_template.format(self.name) self.job_class = backend_class(self, 'job_class', override=job_class) def __len__(self): """Returns the number of jobs in this registry""" return self.count def __eq__(self, other): return (self.name == other.name and self.connection == other.connection) def __contains__(self, item): """ Returns a boolean indicating registry contains the given job instance or job id. """ job_id = item if isinstance(item, self.job_class): job_id = item.id return self.connection.zscore(self.key, job_id) is not None @property def count(self): """Returns the number of jobs in this registry""" self.cleanup() return self.connection.zcard(self.key) def add(self, job, ttl=0, pipeline=None): """Adds a job to a registry with expiry time of now + ttl, unless it's -1 which is set to +inf""" score = ttl if ttl < 0 else current_timestamp() + ttl if score == -1: score = '+inf' if pipeline is not None: return pipeline.zadd(self.key, {job.id: score}) return self.connection.zadd(self.key, {job.id: score}) def remove(self, job, pipeline=None, delete_job=False): """Removes job from registry and deletes it if `delete_job == True`""" connection = pipeline if pipeline is not None else self.connection job_id = job.id if isinstance(job, self.job_class) else job result = connection.zrem(self.key, job_id) if delete_job: if isinstance(job, self.job_class): job_instance = job else: job_instance = Job.fetch(job_id, connection=connection) job_instance.delete() return result def get_expired_job_ids(self, timestamp=None): """Returns job ids whose score are less than current timestamp. Returns ids for jobs with an expiry time earlier than timestamp, specified as seconds since the Unix epoch. timestamp defaults to call time if unspecified. """ score = timestamp if timestamp is not None else current_timestamp() return [as_text(job_id) for job_id in self.connection.zrangebyscore(self.key, 0, score)] def get_job_ids(self, start=0, end=-1): """Returns list of all job ids.""" self.cleanup() return [as_text(job_id) for job_id in self.connection.zrange(self.key, start, end)] def get_queue(self): """Returns Queue object associated with this registry.""" return Queue(self.name, connection=self.connection) def get_expiration_time(self, job): """Returns job's expiration time.""" score = self.connection.zscore(self.key, job.id) return datetime.utcfromtimestamp(score) class StartedJobRegistry(BaseRegistry): """ Registry of currently executing jobs. Each queue maintains a StartedJobRegistry. Jobs in this registry are ones that are currently being executed. Jobs are added to registry right before they are executed and removed right after completion (success or failure). """ key_template = 'rq:wip:{0}' def cleanup(self, timestamp=None): """Remove expired jobs from registry and add them to FailedJobRegistry. Removes jobs with an expiry time earlier than timestamp, specified as seconds since the Unix epoch. timestamp defaults to call time if unspecified. Removed jobs are added to the global failed job queue. """ score = timestamp if timestamp is not None else current_timestamp() job_ids = self.get_expired_job_ids(score) if job_ids: failed_job_registry = FailedJobRegistry(self.name, self.connection) with self.connection.pipeline() as pipeline: for job_id in job_ids: try: job = self.job_class.fetch(job_id, connection=self.connection) job.set_status(JobStatus.FAILED) job.save(pipeline=pipeline, include_meta=False) job.cleanup(ttl=-1, pipeline=pipeline) failed_job_registry.add(job, job.failure_ttl) except NoSuchJobError: pass pipeline.zremrangebyscore(self.key, 0, score) pipeline.execute() return job_ids class FinishedJobRegistry(BaseRegistry): """ Registry of jobs that have been completed. Jobs are added to this registry after they have successfully completed for monitoring purposes. """ key_template = 'rq:finished:{0}' def cleanup(self, timestamp=None): """Remove expired jobs from registry. Removes jobs with an expiry time earlier than timestamp, specified as seconds since the Unix epoch. timestamp defaults to call time if unspecified. """ score = timestamp if timestamp is not None else current_timestamp() self.connection.zremrangebyscore(self.key, 0, score) class FailedJobRegistry(BaseRegistry): """ Registry of containing failed jobs. """ key_template = 'rq:failed:{0}' def cleanup(self, timestamp=None): """Remove expired jobs from registry. Removes jobs with an expiry time earlier than timestamp, specified as seconds since the Unix epoch. timestamp defaults to call time if unspecified. """ score = timestamp if timestamp is not None else current_timestamp() self.connection.zremrangebyscore(self.key, 0, score) def add(self, job, ttl=None, exc_string='', pipeline=None): """ Adds a job to a registry with expiry time of now + ttl. `ttl` defaults to DEFAULT_FAILURE_TTL if not specified. """ if ttl is None: ttl = DEFAULT_FAILURE_TTL score = ttl if ttl < 0 else current_timestamp() + ttl if pipeline: p = pipeline else: p = self.connection.pipeline() job.exc_info = exc_string job.save(pipeline=p, include_meta=False) job.cleanup(ttl=ttl, pipeline=p) p.zadd(self.key, {job.id: score}) if not pipeline: p.execute() def requeue(self, job_or_id): """Requeues the job with the given job ID.""" if isinstance(job_or_id, self.job_class): job = job_or_id else: job = self.job_class.fetch(job_or_id, connection=self.connection) result = self.connection.zrem(self.key, job.id) if not result: raise InvalidJobOperation queue = Queue(job.origin, connection=self.connection, job_class=self.job_class) return queue.enqueue_job(job) class DeferredJobRegistry(BaseRegistry): """ Registry of deferred jobs (waiting for another job to finish). """ key_template = 'rq:deferred:{0}' def cleanup(self): """This method is only here to prevent errors because this method is automatically called by `count()` and `get_job_ids()` methods implemented in BaseRegistry.""" pass class ScheduledJobRegistry(BaseRegistry): """ Registry of scheduled jobs. """ key_template = 'rq:scheduled:{0}' def __init__(self, *args, **kwargs): super(ScheduledJobRegistry, self).__init__(*args, **kwargs) # The underlying implementation of get_jobs_to_enqueue() is # the same as get_expired_job_ids, but get_expired_job_ids() doesn't # make sense in this context self.get_jobs_to_enqueue = self.get_expired_job_ids def schedule(self, job, scheduled_datetime, pipeline=None): """ Adds job to registry, scored by its execution time (in UTC). If datetime has no tzinfo, it will assume localtimezone. """ # If datetime has no timezone, assume server's local timezone # if we're on Python 3. If we're on Python 2.7, raise an # exception since Python < 3.2 has no builtin `timezone` class if not scheduled_datetime.tzinfo: try: from datetime import timezone except ImportError: raise ValueError('datetime object with no timezone') tz = timezone(timedelta(seconds=-time.timezone)) scheduled_datetime = scheduled_datetime.replace(tzinfo=tz) timestamp = calendar.timegm(scheduled_datetime.utctimetuple()) return self.connection.zadd(self.key, {job.id: timestamp}) def cleanup(self): """This method is only here to prevent errors because this method is automatically called by `count()` and `get_job_ids()` methods implemented in BaseRegistry.""" pass def remove_jobs(self, timestamp=None, pipeline=None): """Remove jobs whose timestamp is in the past from registry.""" connection = pipeline if pipeline is not None else self.connection score = timestamp if timestamp is not None else current_timestamp() return connection.zremrangebyscore(self.key, 0, score) def get_jobs_to_schedule(self, timestamp=None): """Remove jobs whose timestamp is in the past from registry.""" score = timestamp if timestamp is not None else current_timestamp() return [as_text(job_id) for job_id in self.connection.zrangebyscore(self.key, 0, score)] def get_scheduled_time(self, job_or_id): """Returns datetime (UTC) at which job is scheduled to be enqueued""" if isinstance(job_or_id, self.job_class): job_id = job_or_id.id else: job_id = job_or_id score = self.connection.zscore(self.key, job_id) if not score: raise NoSuchJobError return datetime.fromtimestamp(score, tz=utc) def clean_registries(queue): """Cleans StartedJobRegistry and FinishedJobRegistry of a queue.""" registry = FinishedJobRegistry(name=queue.name, connection=queue.connection, job_class=queue.job_class) registry.cleanup() registry = StartedJobRegistry(name=queue.name, connection=queue.connection, job_class=queue.job_class) registry.cleanup() registry = FailedJobRegistry(name=queue.name, connection=queue.connection, job_class=queue.job_class) registry.cleanup() rq-1.2.2/rq/local.py0000644000076500000240000003112613566232513014661 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- # flake8: noqa """ werkzeug.local ~~~~~~~~~~~~~~ This module implements context-local objects. :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ # Since each thread has its own greenlet we can just use those as identifiers # for the context. If greenlets are not available we fall back to the # current thread ident. try: from greenlet import getcurrent as get_ident except ImportError: # noqa try: from thread import get_ident # noqa except ImportError: # noqa try: from _thread import get_ident # noqa except ImportError: # noqa from dummy_thread import get_ident # noqa def release_local(local): """Releases the contents of the local for the current context. This makes it possible to use locals without a manager. Example:: >>> loc = Local() >>> loc.foo = 42 >>> release_local(loc) >>> hasattr(loc, 'foo') False With this function one can release :class:`Local` objects as well as :class:`StackLocal` objects. However it is not possible to release data held by proxies that way, one always has to retain a reference to the underlying local object in order to be able to release it. .. versionadded:: 0.6.1 """ local.__release_local__() class Local(object): __slots__ = ('__storage__', '__ident_func__') def __init__(self): object.__setattr__(self, '__storage__', {}) object.__setattr__(self, '__ident_func__', get_ident) def __iter__(self): return iter(self.__storage__.items()) def __call__(self, proxy): """Create a proxy for a name.""" return LocalProxy(self, proxy) def __release_local__(self): self.__storage__.pop(self.__ident_func__(), None) def __getattr__(self, name): try: return self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): ident = self.__ident_func__() storage = self.__storage__ try: storage[ident][name] = value except KeyError: storage[ident] = {name: value} def __delattr__(self, name): try: del self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) class LocalStack(object): """This class works similar to a :class:`Local` but keeps a stack of objects instead. This is best explained with an example:: >>> ls = LocalStack() >>> ls.push(42) >>> ls.top 42 >>> ls.push(23) >>> ls.top 23 >>> ls.pop() 23 >>> ls.top 42 They can be force released by using a :class:`LocalManager` or with the :func:`release_local` function but the correct way is to pop the item from the stack after using. When the stack is empty it will no longer be bound to the current context (and as such released). By calling the stack without arguments it returns a proxy that resolves to the topmost item on the stack. .. versionadded:: 0.6.1 """ def __init__(self): self._local = Local() def __release_local__(self): self._local.__release_local__() def _get__ident_func__(self): return self._local.__ident_func__ def _set__ident_func__(self, value): # noqa object.__setattr__(self._local, '__ident_func__', value) __ident_func__ = property(_get__ident_func__, _set__ident_func__) del _get__ident_func__, _set__ident_func__ def __call__(self): def _lookup(): rv = self.top if rv is None: raise RuntimeError('object unbound') return rv return LocalProxy(_lookup) def push(self, obj): """Pushes a new item to the stack""" rv = getattr(self._local, 'stack', None) if rv is None: self._local.stack = rv = [] rv.append(obj) return rv def pop(self): """Removes the topmost item from the stack, will return the old value or `None` if the stack was already empty. """ stack = getattr(self._local, 'stack', None) if stack is None: return None elif len(stack) == 1: release_local(self._local) return stack[-1] else: return stack.pop() @property def top(self): """The topmost item on the stack. If the stack is empty, `None` is returned. """ try: return self._local.stack[-1] except (AttributeError, IndexError): return None def __len__(self): stack = getattr(self._local, 'stack', None) if stack is None: return 0 return len(stack) class LocalManager(object): """Local objects cannot manage themselves. For that you need a local manager. You can pass a local manager multiple locals or add them later by appending them to `manager.locals`. Everytime the manager cleans up it, will clean up all the data left in the locals for this context. The `ident_func` parameter can be added to override the default ident function for the wrapped locals. .. versionchanged:: 0.6.1 Instead of a manager the :func:`release_local` function can be used as well. .. versionchanged:: 0.7 `ident_func` was added. """ def __init__(self, locals=None, ident_func=None): if locals is None: self.locals = [] elif isinstance(locals, Local): self.locals = [locals] else: self.locals = list(locals) if ident_func is not None: self.ident_func = ident_func for local in self.locals: object.__setattr__(local, '__ident_func__', ident_func) else: self.ident_func = get_ident def get_ident(self): """Return the context identifier the local objects use internally for this context. You cannot override this method to change the behavior but use it to link other context local objects (such as SQLAlchemy's scoped sessions) to the Werkzeug locals. .. versionchanged:: 0.7 You can pass a different ident function to the local manager that will then be propagated to all the locals passed to the constructor. """ return self.ident_func() def cleanup(self): """Manually clean up the data in the locals for this context. Call this at the end of the request or use `make_middleware()`. """ for local in self.locals: release_local(local) def __repr__(self): return '<%s storages: %d>' % ( self.__class__.__name__, len(self.locals) ) class LocalProxy(object): """Acts as a proxy for a werkzeug local. Forwards all operations to a proxied object. The only operations not supported for forwarding are right handed operands and any kind of assignment. Example usage:: from werkzeug.local import Local l = Local() # these are proxies request = l('request') user = l('user') from werkzeug.local import LocalStack _response_local = LocalStack() # this is a proxy response = _response_local() Whenever something is bound to l.user / l.request the proxy objects will forward all operations. If no object is bound a :exc:`RuntimeError` will be raised. To create proxies to :class:`Local` or :class:`LocalStack` objects, call the object as shown above. If you want to have a proxy to an object looked up by a function, you can (as of Werkzeug 0.6.1) pass a function to the :class:`LocalProxy` constructor:: session = LocalProxy(lambda: get_current_request().session) .. versionchanged:: 0.6.1 The class can be instanciated with a callable as well now. """ __slots__ = ('__local', '__dict__', '__name__') def __init__(self, local, name=None): object.__setattr__(self, '_LocalProxy__local', local) object.__setattr__(self, '__name__', name) def _get_current_object(self): """Return the current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context. """ if not hasattr(self.__local, '__release_local__'): return self.__local() try: return getattr(self.__local, self.__name__) except AttributeError: raise RuntimeError('no object bound to %s' % self.__name__) @property def __dict__(self): try: return self._get_current_object().__dict__ except RuntimeError: raise AttributeError('__dict__') def __repr__(self): try: obj = self._get_current_object() except RuntimeError: return '<%s unbound>' % self.__class__.__name__ return repr(obj) def __nonzero__(self): try: return bool(self._get_current_object()) except RuntimeError: return False def __unicode__(self): try: return unicode(self._get_current_object()) except RuntimeError: return repr(self) def __dir__(self): try: return dir(self._get_current_object()) except RuntimeError: return [] def __getattr__(self, name): if name == '__members__': return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key, value): self._get_current_object()[key] = value def __delitem__(self, key): del self._get_current_object()[key] def __setslice__(self, i, j, seq): self._get_current_object()[i:j] = seq def __delslice__(self, i, j): del self._get_current_object()[i:j] __setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v) __delattr__ = lambda x, n: delattr(x._get_current_object(), n) __str__ = lambda x: str(x._get_current_object()) __lt__ = lambda x, o: x._get_current_object() < o __le__ = lambda x, o: x._get_current_object() <= o __eq__ = lambda x, o: x._get_current_object() == o __ne__ = lambda x, o: x._get_current_object() != o __gt__ = lambda x, o: x._get_current_object() > o __ge__ = lambda x, o: x._get_current_object() >= o __cmp__ = lambda x, o: cmp(x._get_current_object(), o) __hash__ = lambda x: hash(x._get_current_object()) __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) __len__ = lambda x: len(x._get_current_object()) __getitem__ = lambda x, i: x._get_current_object()[i] __iter__ = lambda x: iter(x._get_current_object()) __contains__ = lambda x, i: i in x._get_current_object() __getslice__ = lambda x, i, j: x._get_current_object()[i:j] __add__ = lambda x, o: x._get_current_object() + o __sub__ = lambda x, o: x._get_current_object() - o __mul__ = lambda x, o: x._get_current_object() * o __floordiv__ = lambda x, o: x._get_current_object() // o __mod__ = lambda x, o: x._get_current_object() % o __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) __pow__ = lambda x, o: x._get_current_object() ** o __lshift__ = lambda x, o: x._get_current_object() << o __rshift__ = lambda x, o: x._get_current_object() >> o __and__ = lambda x, o: x._get_current_object() & o __xor__ = lambda x, o: x._get_current_object() ^ o __or__ = lambda x, o: x._get_current_object() | o __div__ = lambda x, o: x._get_current_object().__div__(o) __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) __neg__ = lambda x: -(x._get_current_object()) __pos__ = lambda x: +(x._get_current_object()) __abs__ = lambda x: abs(x._get_current_object()) __invert__ = lambda x: ~(x._get_current_object()) __complex__ = lambda x: complex(x._get_current_object()) __int__ = lambda x: int(x._get_current_object()) __long__ = lambda x: long(x._get_current_object()) __float__ = lambda x: float(x._get_current_object()) __oct__ = lambda x: oct(x._get_current_object()) __hex__ = lambda x: hex(x._get_current_object()) __index__ = lambda x: x._get_current_object().__index__() __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) __enter__ = lambda x: x._get_current_object().__enter__() __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) rq-1.2.2/rq/job.py0000644000076500000240000006372713615036160014350 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) import inspect import warnings import zlib from functools import partial from uuid import uuid4 from rq.compat import as_text, decode_redis_hash, string_types, text_type from .connections import resolve_connection from .exceptions import InvalidJobDependency, NoSuchJobError, UnpickleError from .local import LocalStack from .utils import (enum, import_attribute, parse_timeout, str_to_date, utcformat, utcnow) try: import cPickle as pickle except ImportError: # noqa # pragma: no cover import pickle # Serialize pickle dumps using the highest pickle protocol (binary, default # uses ascii) dumps = partial(pickle.dumps, protocol=pickle.HIGHEST_PROTOCOL) loads = pickle.loads JobStatus = enum( 'JobStatus', QUEUED='queued', FINISHED='finished', FAILED='failed', STARTED='started', DEFERRED='deferred', SCHEDULED='scheduled', ) # Sentinel value to mark that some of our lazily evaluated properties have not # yet been evaluated. UNEVALUATED = object() def unpickle(pickled_string): """Unpickles a string, but raises a unified UnpickleError in case anything fails. This is a helper method to not have to deal with the fact that `loads()` potentially raises many types of exceptions (e.g. AttributeError, IndexError, TypeError, KeyError, etc.) """ try: obj = loads(pickled_string) except Exception as e: raise UnpickleError('Could not unpickle', pickled_string, e) return obj def cancel_job(job_id, connection=None): """Cancels the job with the given job ID, preventing execution. Discards any job info (i.e. it can't be requeued later). """ Job.fetch(job_id, connection=connection).cancel() def get_current_job(connection=None, job_class=None): """Returns the Job instance that is currently being executed. If this function is invoked from outside a job context, None is returned. """ if job_class: warnings.warn("job_class argument for get_current_job is deprecated.", DeprecationWarning) return _job_stack.top def requeue_job(job_id, connection): job = Job.fetch(job_id, connection=connection) return job.requeue() class Job(object): """A Job is just a convenient datastructure to pass around job (meta) data. """ redis_job_namespace_prefix = 'rq:job:' # Job construction @classmethod def create(cls, func, args=None, kwargs=None, connection=None, result_ttl=None, ttl=None, status=None, description=None, depends_on=None, timeout=None, id=None, origin=None, meta=None, failure_ttl=None): """Creates a new Job instance for the given function, arguments, and keyword arguments. """ if args is None: args = () if kwargs is None: kwargs = {} if not isinstance(args, (tuple, list)): raise TypeError('{0!r} is not a valid args list'.format(args)) if not isinstance(kwargs, dict): raise TypeError('{0!r} is not a valid kwargs dict'.format(kwargs)) job = cls(connection=connection) if id is not None: job.set_id(id) if origin is not None: job.origin = origin # Set the core job tuple properties job._instance = None if inspect.ismethod(func): job._instance = func.__self__ job._func_name = func.__name__ elif inspect.isfunction(func) or inspect.isbuiltin(func): job._func_name = '{0}.{1}'.format(func.__module__, func.__name__) elif isinstance(func, string_types): job._func_name = as_text(func) elif not inspect.isclass(func) and hasattr(func, '__call__'): # a callable class instance job._instance = func job._func_name = '__call__' else: raise TypeError('Expected a callable or a string, but got: {0}'.format(func)) job._args = args job._kwargs = kwargs # Extra meta data job.description = description or job.get_call_string() job.result_ttl = parse_timeout(result_ttl) job.failure_ttl = parse_timeout(failure_ttl) job.ttl = parse_timeout(ttl) job.timeout = parse_timeout(timeout) job._status = status job.meta = meta or {} # dependency could be job instance or id if depends_on is not None: job._dependency_ids = [depends_on.id if isinstance(depends_on, Job) else depends_on] return job def get_status(self, refresh=True): if refresh: self._status = as_text(self.connection.hget(self.key, 'status')) return self._status def set_status(self, status, pipeline=None): self._status = status connection = pipeline or self.connection connection.hset(self.key, 'status', self._status) @property def is_finished(self): return self.get_status() == JobStatus.FINISHED @property def is_queued(self): return self.get_status() == JobStatus.QUEUED @property def is_failed(self): return self.get_status() == JobStatus.FAILED @property def is_started(self): return self.get_status() == JobStatus.STARTED @property def is_deferred(self): return self.get_status() == JobStatus.DEFERRED @property def is_scheduled(self): return self.get_status() == JobStatus.SCHEDULED @property def _dependency_id(self): """Returns the first item in self._dependency_ids. Present preserve compatibility with third party packages.. """ if self._dependency_ids: return self._dependency_ids[0] @property def dependency(self): """Returns a job's dependency. To avoid repeated Redis fetches, we cache job.dependency as job._dependency. """ if not self._dependency_ids: return None if hasattr(self, '_dependency'): return self._dependency job = self.fetch(self._dependency_ids[0], connection=self.connection) self._dependency = job return job @property def dependent_ids(self): """Returns a list of ids of jobs whose execution depends on this job's successful execution.""" return list(map(as_text, self.connection.smembers(self.dependents_key))) @property def func(self): func_name = self.func_name if func_name is None: return None if self.instance: return getattr(self.instance, func_name) return import_attribute(self.func_name) def _unpickle_data(self): self._func_name, self._instance, self._args, self._kwargs = unpickle(self.data) @property def data(self): if self._data is UNEVALUATED: if self._func_name is UNEVALUATED: raise ValueError('Cannot build the job data') if self._instance is UNEVALUATED: self._instance = None if self._args is UNEVALUATED: self._args = () if self._kwargs is UNEVALUATED: self._kwargs = {} job_tuple = self._func_name, self._instance, self._args, self._kwargs self._data = dumps(job_tuple) return self._data @data.setter def data(self, value): self._data = value self._func_name = UNEVALUATED self._instance = UNEVALUATED self._args = UNEVALUATED self._kwargs = UNEVALUATED @property def func_name(self): if self._func_name is UNEVALUATED: self._unpickle_data() return self._func_name @func_name.setter def func_name(self, value): self._func_name = value self._data = UNEVALUATED @property def instance(self): if self._instance is UNEVALUATED: self._unpickle_data() return self._instance @instance.setter def instance(self, value): self._instance = value self._data = UNEVALUATED @property def args(self): if self._args is UNEVALUATED: self._unpickle_data() return self._args @args.setter def args(self, value): self._args = value self._data = UNEVALUATED @property def kwargs(self): if self._kwargs is UNEVALUATED: self._unpickle_data() return self._kwargs @kwargs.setter def kwargs(self, value): self._kwargs = value self._data = UNEVALUATED @classmethod def exists(cls, job_id, connection=None): """Returns whether a job hash exists for the given job ID.""" conn = resolve_connection(connection) return conn.exists(cls.key_for(job_id)) @classmethod def fetch(cls, id, connection=None): """Fetches a persisted job from its corresponding Redis key and instantiates it. """ job = cls(id, connection=connection) job.refresh() return job @classmethod def fetch_many(cls, job_ids, connection): """ Bulk version of Job.fetch For any job_ids which a job does not exist, the corresponding item in the returned list will be None. """ with connection.pipeline() as pipeline: for job_id in job_ids: pipeline.hgetall(cls.key_for(job_id)) results = pipeline.execute() jobs = [] for i, job_id in enumerate(job_ids): if results[i]: job = cls(job_id, connection=connection) job.restore(results[i]) jobs.append(job) else: jobs.append(None) return jobs def __init__(self, id=None, connection=None): self.connection = resolve_connection(connection) self._id = id self.created_at = utcnow() self._data = UNEVALUATED self._func_name = UNEVALUATED self._instance = UNEVALUATED self._args = UNEVALUATED self._kwargs = UNEVALUATED self.description = None self.origin = None self.enqueued_at = None self.started_at = None self.ended_at = None self._result = None self.exc_info = None self.timeout = None self.result_ttl = None self.failure_ttl = None self.ttl = None self._status = None self._dependency_ids = [] self.meta = {} def __repr__(self): # noqa # pragma: no cover return '{0}({1!r}, enqueued_at={2!r})'.format(self.__class__.__name__, self._id, self.enqueued_at) def __str__(self): return '<{0} {1}: {2}>'.format(self.__class__.__name__, self.id, self.description) # Job equality def __eq__(self, other): # noqa return isinstance(other, self.__class__) and self.id == other.id def __hash__(self): # pragma: no cover return hash(self.id) # Data access def get_id(self): # noqa """The job ID for this job instance. Generates an ID lazily the first time the ID is requested. """ if self._id is None: self._id = text_type(uuid4()) return self._id def set_id(self, value): """Sets a job ID for the given job.""" if not isinstance(value, string_types): raise TypeError('id must be a string, not {0}'.format(type(value))) self._id = value id = property(get_id, set_id) @classmethod def key_for(cls, job_id): """The Redis key that is used to store job hash under.""" return (cls.redis_job_namespace_prefix + job_id).encode('utf-8') @classmethod def dependents_key_for(cls, job_id): """The Redis key that is used to store job dependents hash under.""" return '{0}{1}:dependents'.format(cls.redis_job_namespace_prefix, job_id) @property def key(self): """The Redis key that is used to store job hash under.""" return self.key_for(self.id) @property def dependents_key(self): """The Redis key that is used to store job dependents hash under.""" return self.dependents_key_for(self.id) @property def dependencies_key(self): return '{0}:{1}:dependencies'.format(self.redis_job_namespace_prefix, self.id) def fetch_dependencies(self, watch=False, pipeline=None): """ Fetch all of a job's dependencies. If a pipeline is supplied, and watch is true, then set WATCH on all the keys of all dependencies. Returned jobs will use self's connection, not the pipeline supplied. """ connection = pipeline if pipeline is not None else self.connection if watch and self._dependency_ids: connection.watch(*self._dependency_ids) jobs = self.fetch_many(self._dependency_ids, connection=self.connection) for i, job in enumerate(jobs): if not job: raise NoSuchJobError('Dependency {0} does not exist'.format(self._dependency_ids[i])) return jobs @property def result(self): """Returns the return value of the job. Initially, right after enqueueing a job, the return value will be None. But when the job has been executed, and had a return value or exception, this will return that value or exception. Note that, when the job has no return value (i.e. returns None), the ReadOnlyJob object is useless, as the result won't be written back to Redis. Also note that you cannot draw the conclusion that a job has _not_ been executed when its return value is None, since return values written back to Redis will expire after a given amount of time (500 seconds by default). """ if self._result is None: rv = self.connection.hget(self.key, 'result') if rv is not None: # cache the result self._result = loads(rv) return self._result """Backwards-compatibility accessor property `return_value`.""" return_value = result def restore(self, raw_data): """Overwrite properties with the provided values stored in Redis""" obj = decode_redis_hash(raw_data) try: raw_data = obj['data'] except KeyError: raise NoSuchJobError('Unexpected job format: {0}'.format(obj)) try: self.data = zlib.decompress(raw_data) except zlib.error: # Fallback to uncompressed string self.data = raw_data self.created_at = str_to_date(obj.get('created_at')) self.origin = as_text(obj.get('origin')) self.description = as_text(obj.get('description')) self.enqueued_at = str_to_date(obj.get('enqueued_at')) self.started_at = str_to_date(obj.get('started_at')) self.ended_at = str_to_date(obj.get('ended_at')) result = obj.get('result') if result: try: self._result = unpickle(obj.get('result')) except UnpickleError: self._result = 'Unpickleable return value' self.timeout = parse_timeout(obj.get('timeout')) if obj.get('timeout') else None self.result_ttl = int(obj.get('result_ttl')) if obj.get('result_ttl') else None # noqa self.failure_ttl = int(obj.get('failure_ttl')) if obj.get('failure_ttl') else None # noqa self._status = as_text(obj.get('status')) if obj.get('status') else None dependency_id = obj.get('dependency_id', None) self._dependency_ids = [as_text(dependency_id)] if dependency_id else [] self.ttl = int(obj.get('ttl')) if obj.get('ttl') else None self.meta = unpickle(obj.get('meta')) if obj.get('meta') else {} raw_exc_info = obj.get('exc_info') if raw_exc_info: try: self.exc_info = as_text(zlib.decompress(raw_exc_info)) except zlib.error: # Fallback to uncompressed string self.exc_info = as_text(raw_exc_info) # Persistence def refresh(self): # noqa """Overwrite the current instance's properties with the values in the corresponding Redis key. Will raise a NoSuchJobError if no corresponding Redis key exists. """ data = self.connection.hgetall(self.key) if not data: raise NoSuchJobError('No such job: {0}'.format(self.key)) self.restore(data) def to_dict(self, include_meta=True): """ Returns a serialization of the current job instance You can exclude serializing the `meta` dictionary by setting `include_meta=False`. """ obj = {} obj['created_at'] = utcformat(self.created_at or utcnow()) obj['data'] = zlib.compress(self.data) if self.origin is not None: obj['origin'] = self.origin if self.description is not None: obj['description'] = self.description if self.enqueued_at is not None: obj['enqueued_at'] = utcformat(self.enqueued_at) if self.started_at is not None: obj['started_at'] = utcformat(self.started_at) if self.ended_at is not None: obj['ended_at'] = utcformat(self.ended_at) if self._result is not None: try: obj['result'] = dumps(self._result) except: obj['result'] = 'Unpickleable return value' if self.exc_info is not None: obj['exc_info'] = zlib.compress(str(self.exc_info).encode('utf-8')) if self.timeout is not None: obj['timeout'] = self.timeout if self.result_ttl is not None: obj['result_ttl'] = self.result_ttl if self.failure_ttl is not None: obj['failure_ttl'] = self.failure_ttl if self._status is not None: obj['status'] = self._status if self._dependency_ids: obj['dependency_id'] = self._dependency_ids[0] if self.meta and include_meta: obj['meta'] = dumps(self.meta) if self.ttl: obj['ttl'] = self.ttl return obj def save(self, pipeline=None, include_meta=True): """ Dumps the current job instance to its corresponding Redis key. Exclude saving the `meta` dictionary by setting `include_meta=False`. This is useful to prevent clobbering user metadata without an expensive `refresh()` call first. Redis key persistence may be altered by `cleanup()` method. """ key = self.key connection = pipeline if pipeline is not None else self.connection connection.hmset(key, self.to_dict(include_meta=include_meta)) def save_meta(self): """Stores job meta from the job instance to the corresponding Redis key.""" meta = dumps(self.meta) self.connection.hset(self.key, 'meta', meta) def cancel(self, pipeline=None): """Cancels the given job, which will prevent the job from ever being ran (or inspected). This method merely exists as a high-level API call to cancel jobs without worrying about the internals required to implement job cancellation. """ from .queue import Queue pipeline = pipeline or self.connection.pipeline() if self.origin: q = Queue(name=self.origin, connection=self.connection) q.remove(self, pipeline=pipeline) pipeline.execute() def requeue(self): """Requeues job.""" self.failed_job_registry.requeue(self) def delete(self, pipeline=None, remove_from_queue=True, delete_dependents=False): """Cancels the job and deletes the job hash from Redis. Jobs depending on this job can optionally be deleted as well.""" if remove_from_queue: self.cancel(pipeline=pipeline) connection = pipeline if pipeline is not None else self.connection if self.is_finished: from .registry import FinishedJobRegistry registry = FinishedJobRegistry(self.origin, connection=self.connection, job_class=self.__class__) registry.remove(self, pipeline=pipeline) elif self.is_deferred: from .registry import DeferredJobRegistry registry = DeferredJobRegistry(self.origin, connection=self.connection, job_class=self.__class__) registry.remove(self, pipeline=pipeline) elif self.is_started: from .registry import StartedJobRegistry registry = StartedJobRegistry(self.origin, connection=self.connection, job_class=self.__class__) registry.remove(self, pipeline=pipeline) elif self.is_scheduled: from .registry import ScheduledJobRegistry registry = ScheduledJobRegistry(self.origin, connection=self.connection, job_class=self.__class__) registry.remove(self, pipeline=pipeline) elif self.is_failed: self.failed_job_registry.remove(self, pipeline=pipeline) if delete_dependents: self.delete_dependents(pipeline=pipeline) connection.delete(self.key, self.dependents_key, self.dependencies_key) def delete_dependents(self, pipeline=None): """Delete jobs depending on this job.""" connection = pipeline if pipeline is not None else self.connection for dependent_id in self.dependent_ids: try: job = Job.fetch(dependent_id, connection=self.connection) job.delete(pipeline=pipeline, remove_from_queue=False) except NoSuchJobError: # It could be that the dependent job was never saved to redis pass connection.delete(self.dependents_key) # Job execution def perform(self): # noqa """Invokes the job function with the job arguments.""" self.connection.persist(self.key) _job_stack.push(self) try: self._result = self._execute() finally: assert self is _job_stack.pop() return self._result def _execute(self): return self.func(*self.args, **self.kwargs) def get_ttl(self, default_ttl=None): """Returns ttl for a job that determines how long a job will be persisted. In the future, this method will also be responsible for determining ttl for repeated jobs. """ return default_ttl if self.ttl is None else self.ttl def get_result_ttl(self, default_ttl=None): """Returns ttl for a job that determines how long a jobs result will be persisted. In the future, this method will also be responsible for determining ttl for repeated jobs. """ return default_ttl if self.result_ttl is None else self.result_ttl # Representation def get_call_string(self): # noqa """Returns a string representation of the call, formatted as a regular Python function invocation statement. """ if self.func_name is None: return None arg_list = [as_text(repr(arg)) for arg in self.args] kwargs = ['{0}={1}'.format(k, as_text(repr(v))) for k, v in self.kwargs.items()] # Sort here because python 3.3 & 3.4 makes different call_string arg_list += sorted(kwargs) args = ', '.join(arg_list) return '{0}({1})'.format(self.func_name, args) def cleanup(self, ttl=None, pipeline=None, remove_from_queue=True): """Prepare job for eventual deletion (if needed). This method is usually called after successful execution. How long we persist the job and its result depends on the value of ttl: - If ttl is 0, cleanup the job immediately. - If it's a positive number, set the job to expire in X seconds. - If ttl is negative, don't set an expiry to it (persist forever) """ if ttl == 0: self.delete(pipeline=pipeline, remove_from_queue=remove_from_queue) elif not ttl: return elif ttl > 0: connection = pipeline if pipeline is not None else self.connection connection.expire(self.key, ttl) connection.expire(self.dependents_key, ttl) connection.expire(self.dependencies_key, ttl) @property def failed_job_registry(self): from .registry import FailedJobRegistry return FailedJobRegistry(self.origin, connection=self.connection, job_class=self.__class__) def register_dependency(self, pipeline=None): """Jobs may have dependencies. Jobs are enqueued only if the job they depend on is successfully performed. We record this relation as a reverse dependency (a Redis set), with a key that looks something like: rq:job:job_id:dependents = {'job_id_1', 'job_id_2'} This method adds the job in its dependency's dependents set and adds the job to DeferredJobRegistry. """ from .registry import DeferredJobRegistry registry = DeferredJobRegistry(self.origin, connection=self.connection, job_class=self.__class__) registry.add(self, pipeline=pipeline) connection = pipeline if pipeline is not None else self.connection for dependency_id in self._dependency_ids: dependents_key = self.dependents_key_for(dependency_id) connection.sadd(dependents_key, self.id) connection.sadd(self.dependencies_key, dependency_id) _job_stack = LocalStack() rq-1.2.2/rq/connections.py0000644000076500000240000000400013566232513016100 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) from contextlib import contextmanager from redis import Redis from .local import LocalStack, release_local class NoRedisConnectionException(Exception): pass @contextmanager def Connection(connection=None): # noqa if connection is None: connection = Redis() push_connection(connection) try: yield finally: popped = pop_connection() assert popped == connection, \ 'Unexpected Redis connection was popped off the stack. ' \ 'Check your Redis connection setup.' def push_connection(redis): """Pushes the given connection on the stack.""" _connection_stack.push(redis) def pop_connection(): """Pops the topmost connection from the stack.""" return _connection_stack.pop() def use_connection(redis=None): """Clears the stack and uses the given connection. Protects against mixed use of use_connection() and stacked connection contexts. """ assert len(_connection_stack) <= 1, \ 'You should not mix Connection contexts with use_connection()' release_local(_connection_stack) if redis is None: redis = Redis() push_connection(redis) def get_current_connection(): """Returns the current Redis connection (i.e. the topmost on the connection stack). """ return _connection_stack.top def resolve_connection(connection=None): """Convenience function to resolve the given or the current connection. Raises an exception if it cannot resolve a connection now. """ if connection is not None: return connection connection = get_current_connection() if connection is None: raise NoRedisConnectionException('Could not resolve a Redis connection') return connection _connection_stack = LocalStack() __all__ = ['Connection', 'get_current_connection', 'push_connection', 'pop_connection', 'use_connection'] rq-1.2.2/rq/__init__.py0000644000076500000240000000071613566232513015327 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- # flake8: noqa from __future__ import (absolute_import, division, print_function, unicode_literals) from .connections import (Connection, get_current_connection, pop_connection, push_connection, use_connection) from .job import cancel_job, get_current_job, requeue_job from .queue import Queue from .version import VERSION from .worker import SimpleWorker, Worker __version__ = VERSION rq-1.2.2/rq/suspension.py0000644000076500000240000000135613566232513015777 0ustar selwinstaff00000000000000WORKERS_SUSPENDED = 'rq:suspended' def is_suspended(connection, worker=None): with connection.pipeline() as pipeline: if worker is not None: worker.heartbeat(pipeline=pipeline) pipeline.exists(WORKERS_SUSPENDED) # pipeline returns a list of responses # https://github.com/andymccurdy/redis-py#pipelines return pipeline.execute()[-1] def suspend(connection, ttl=None): """ttl = time to live in seconds. Default is no expiration Note: If you pass in 0 it will invalidate right away """ connection.set(WORKERS_SUSPENDED, 1) if ttl is not None: connection.expire(WORKERS_SUSPENDED, ttl) def resume(connection): return connection.delete(WORKERS_SUSPENDED) rq-1.2.2/rq/timeouts.py0000644000076500000240000000460113566232513015436 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) import signal class BaseTimeoutException(Exception): """Base exception for timeouts.""" pass class JobTimeoutException(BaseTimeoutException): """Raised when a job takes longer to complete than the allowed maximum timeout value. """ pass class HorseMonitorTimeoutException(BaseTimeoutException): """Raised when waiting for a horse exiting takes longer than the maximum timeout value. """ pass class BaseDeathPenalty(object): """Base class to setup job timeouts.""" def __init__(self, timeout, exception=JobTimeoutException, **kwargs): self._timeout = timeout self._exception = exception def __enter__(self): self.setup_death_penalty() def __exit__(self, type, value, traceback): # Always cancel immediately, since we're done try: self.cancel_death_penalty() except BaseTimeoutException: # Weird case: we're done with the with body, but now the alarm is # fired. We may safely ignore this situation and consider the # body done. pass # __exit__ may return True to supress further exception handling. We # don't want to suppress any exceptions here, since all errors should # just pass through, BaseTimeoutException being handled normally to the # invoking context. return False def setup_death_penalty(self): raise NotImplementedError() def cancel_death_penalty(self): raise NotImplementedError() class UnixSignalDeathPenalty(BaseDeathPenalty): def handle_death_penalty(self, signum, frame): raise self._exception('Task exceeded maximum timeout value ' '({0} seconds)'.format(self._timeout)) def setup_death_penalty(self): """Sets up an alarm signal and a signal handler that raises an exception after the timeout amount (expressed in seconds). """ signal.signal(signal.SIGALRM, self.handle_death_penalty) signal.alarm(self._timeout) def cancel_death_penalty(self): """Removes the death penalty alarm and puts back the system into default signal handling. """ signal.alarm(0) signal.signal(signal.SIGALRM, signal.SIG_DFL) rq-1.2.2/rq/logutils.py0000644000076500000240000000232313611564137015427 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) import logging from rq.utils import ColorizingStreamHandler from rq.defaults import (DEFAULT_LOGGING_FORMAT, DEFAULT_LOGGING_DATE_FORMAT) def setup_loghandlers(level=None, date_format=DEFAULT_LOGGING_DATE_FORMAT, log_format=DEFAULT_LOGGING_FORMAT, name='rq.worker'): logger = logging.getLogger(name) if not _has_effective_handler(logger): formatter = logging.Formatter(fmt=log_format, datefmt=date_format) handler = ColorizingStreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) if level is not None: logger.setLevel(level) def _has_effective_handler(logger): """ Checks if a logger has a handler that will catch its messages in its logger hierarchy. :param `logging.Logger` logger: The logger to be checked. :return: True if a handler is found for the logger, False otherwise. :rtype: bool """ while True: if logger.handlers: return True if not logger.parent: return False logger = logger.parent rq-1.2.2/rq/cli/0000755000076500000240000000000013615036417013761 5ustar selwinstaff00000000000000rq-1.2.2/rq/cli/__init__.py0000644000076500000240000000032213566232513016067 0ustar selwinstaff00000000000000# flake8: noqa from .cli import main # TODO: the following imports can be removed when we drop the `rqinfo` and # `rqworkers` commands in favor of just shipping the `rq` command. from .cli import info, worker rq-1.2.2/rq/cli/cli.py0000755000076500000240000002653213604001573015106 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- """ RQ command line tool """ from __future__ import (absolute_import, division, print_function, unicode_literals) from functools import update_wrapper import os import sys import click from redis.exceptions import ConnectionError from rq import Connection, __version__ as version from rq.cli.helpers import (read_config_file, refresh, setup_loghandlers_from_args, show_both, show_queues, show_workers, CliConfig) from rq.contrib.legacy import cleanup_ghosts from rq.defaults import (DEFAULT_CONNECTION_CLASS, DEFAULT_JOB_CLASS, DEFAULT_QUEUE_CLASS, DEFAULT_WORKER_CLASS, DEFAULT_RESULT_TTL, DEFAULT_WORKER_TTL, DEFAULT_JOB_MONITORING_INTERVAL, DEFAULT_LOGGING_FORMAT, DEFAULT_LOGGING_DATE_FORMAT) from rq.exceptions import InvalidJobOperationError from rq.registry import FailedJobRegistry, clean_registries from rq.utils import import_attribute from rq.suspension import (suspend as connection_suspend, resume as connection_resume, is_suspended) from rq.worker_registration import clean_worker_registry # Disable the warning that Click displays (as of Click version 5.0) when users # use unicode_literals in Python 2. # See http://click.pocoo.org/dev/python3/#unicode-literals for more details. click.disable_unicode_literals_warning = True shared_options = [ click.option('--url', '-u', envvar='RQ_REDIS_URL', help='URL describing Redis connection details.'), click.option('--config', '-c', envvar='RQ_CONFIG', help='Module containing RQ settings.'), click.option('--worker-class', '-w', envvar='RQ_WORKER_CLASS', default=DEFAULT_WORKER_CLASS, help='RQ Worker class to use'), click.option('--job-class', '-j', envvar='RQ_JOB_CLASS', default=DEFAULT_JOB_CLASS, help='RQ Job class to use'), click.option('--queue-class', envvar='RQ_QUEUE_CLASS', default=DEFAULT_QUEUE_CLASS, help='RQ Queue class to use'), click.option('--connection-class', envvar='RQ_CONNECTION_CLASS', default=DEFAULT_CONNECTION_CLASS, help='Redis client class to use'), click.option('--path', '-P', default='.', help='Specify the import path.', multiple=True) ] def pass_cli_config(func): # add all the shared options to the command for option in shared_options: func = option(func) # pass the cli config object into the command def wrapper(*args, **kwargs): ctx = click.get_current_context() cli_config = CliConfig(**kwargs) return ctx.invoke(func, cli_config, *args[1:], **kwargs) return update_wrapper(wrapper, func) @click.group() @click.version_option(version) def main(): """RQ command line tool.""" pass @main.command() @click.option('--all', '-a', is_flag=True, help='Empty all queues') @click.argument('queues', nargs=-1) @pass_cli_config def empty(cli_config, all, queues, **options): """Empty given queues.""" if all: queues = cli_config.queue_class.all(connection=cli_config.connection, job_class=cli_config.job_class) else: queues = [cli_config.queue_class(queue, connection=cli_config.connection, job_class=cli_config.job_class) for queue in queues] if not queues: click.echo('Nothing to do') sys.exit(0) for queue in queues: num_jobs = queue.empty() click.echo('{0} jobs removed from {1} queue'.format(num_jobs, queue.name)) @main.command() @click.option('--all', '-a', is_flag=True, help='Requeue all failed jobs') @click.option('--queue', required=True, type=str) @click.argument('job_ids', nargs=-1) @pass_cli_config def requeue(cli_config, queue, all, job_class, job_ids, **options): """Requeue failed jobs.""" failed_job_registry = FailedJobRegistry(queue, connection=cli_config.connection) if all: job_ids = failed_job_registry.get_job_ids() if not job_ids: click.echo('Nothing to do') sys.exit(0) click.echo('Requeueing {0} jobs from failed queue'.format(len(job_ids))) fail_count = 0 with click.progressbar(job_ids) as job_ids: for job_id in job_ids: try: failed_job_registry.requeue(job_id) except InvalidJobOperationError: fail_count += 1 if fail_count > 0: click.secho('Unable to requeue {0} jobs from failed job registry'.format(fail_count), fg='red') @main.command() @click.option('--interval', '-i', type=float, help='Updates stats every N seconds (default: don\'t poll)') @click.option('--raw', '-r', is_flag=True, help='Print only the raw numbers, no bar charts') @click.option('--only-queues', '-Q', is_flag=True, help='Show only queue info') @click.option('--only-workers', '-W', is_flag=True, help='Show only worker info') @click.option('--by-queue', '-R', is_flag=True, help='Shows workers by queue') @click.argument('queues', nargs=-1) @pass_cli_config def info(cli_config, interval, raw, only_queues, only_workers, by_queue, queues, **options): """RQ command-line monitor.""" if only_queues: func = show_queues elif only_workers: func = show_workers else: func = show_both try: with Connection(cli_config.connection): if queues: qs = list(map(cli_config.queue_class, queues)) else: qs = cli_config.queue_class.all() for queue in qs: clean_registries(queue) clean_worker_registry(queue) refresh(interval, func, qs, raw, by_queue, cli_config.queue_class, cli_config.worker_class) except ConnectionError as e: click.echo(e) sys.exit(1) except KeyboardInterrupt: click.echo() sys.exit(0) @main.command() @click.option('--burst', '-b', is_flag=True, help='Run in burst mode (quit after all work is done)') @click.option('--logging_level', type=str, default="INFO", help='Set logging level') @click.option('--log-format', type=str, default=DEFAULT_LOGGING_FORMAT, help='Set the format of the logs') @click.option('--date-format', type=str, default=DEFAULT_LOGGING_DATE_FORMAT, help='Set the date format of the logs') @click.option('--name', '-n', help='Specify a different name') @click.option('--results-ttl', type=int, default=DEFAULT_RESULT_TTL, help='Default results timeout to be used') @click.option('--worker-ttl', type=int, default=DEFAULT_WORKER_TTL, help='Default worker timeout to be used') @click.option('--job-monitoring-interval', type=int, default=DEFAULT_JOB_MONITORING_INTERVAL, help='Default job monitoring interval to be used') @click.option('--disable-job-desc-logging', is_flag=True, help='Turn off description logging.') @click.option('--verbose', '-v', is_flag=True, help='Show more output') @click.option('--quiet', '-q', is_flag=True, help='Show less output') @click.option('--sentry-dsn', envvar='RQ_SENTRY_DSN', help='Report exceptions to this Sentry DSN') @click.option('--exception-handler', help='Exception handler(s) to use', multiple=True) @click.option('--pid', help='Write the process ID number to a file at the specified path') @click.option('--disable-default-exception-handler', '-d', is_flag=True, help='Disable RQ\'s default exception handler') @click.option('--max-jobs', type=int, default=None, help='Maximum number of jobs to execute') @click.option('--with-scheduler', '-s', is_flag=True, help='Run worker with scheduler') @click.argument('queues', nargs=-1) @pass_cli_config def worker(cli_config, burst, logging_level, name, results_ttl, worker_ttl, job_monitoring_interval, disable_job_desc_logging, verbose, quiet, sentry_dsn, exception_handler, pid, disable_default_exception_handler, max_jobs, with_scheduler, queues, log_format, date_format, **options): """Starts an RQ worker.""" settings = read_config_file(cli_config.config) if cli_config.config else {} # Worker specific default arguments queues = queues or settings.get('QUEUES', ['default']) sentry_dsn = sentry_dsn or settings.get('SENTRY_DSN') name = name or settings.get('NAME') if pid: with open(os.path.expanduser(pid), "w") as fp: fp.write(str(os.getpid())) setup_loghandlers_from_args(verbose, quiet, date_format, log_format) try: cleanup_ghosts(cli_config.connection) exception_handlers = [] for h in exception_handler: exception_handlers.append(import_attribute(h)) if is_suspended(cli_config.connection): click.secho('RQ is currently suspended, to resume job execution run "rq resume"', fg='red') sys.exit(1) queues = [cli_config.queue_class(queue, connection=cli_config.connection, job_class=cli_config.job_class) for queue in queues] worker = cli_config.worker_class( queues, name=name, connection=cli_config.connection, default_worker_ttl=worker_ttl, default_result_ttl=results_ttl, job_monitoring_interval=job_monitoring_interval, job_class=cli_config.job_class, queue_class=cli_config.queue_class, exception_handlers=exception_handlers or None, disable_default_exception_handler=disable_default_exception_handler, log_job_description=not disable_job_desc_logging ) # Should we configure Sentry? if sentry_dsn: from rq.contrib.sentry import register_sentry register_sentry(sentry_dsn) # if --verbose or --quiet, override --logging_level if verbose or quiet: logging_level = None worker.work(burst=burst, logging_level=logging_level, date_format=date_format, log_format=log_format, max_jobs=max_jobs, with_scheduler=with_scheduler) except ConnectionError as e: print(e) sys.exit(1) @main.command() @click.option('--duration', help='Seconds you want the workers to be suspended. Default is forever.', type=int) @pass_cli_config def suspend(cli_config, duration, **options): """Suspends all workers, to resume run `rq resume`""" if duration is not None and duration < 1: click.echo("Duration must be an integer greater than 1") sys.exit(1) connection_suspend(cli_config.connection, duration) if duration: msg = """Suspending workers for {0} seconds. No new jobs will be started during that time, but then will automatically resume""".format(duration) click.echo(msg) else: click.echo("Suspending workers. No new jobs will be started. But current jobs will be completed") @main.command() @pass_cli_config def resume(cli_config, **options): """Resumes processing of queues, that were suspended with `rq suspend`""" connection_resume(cli_config.connection) click.echo("Resuming workers.") rq-1.2.2/rq/cli/helpers.py0000644000076500000240000001745013566232513016004 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) import sys import importlib import time from functools import partial import click import redis from redis import Redis from redis.sentinel import Sentinel from rq.defaults import (DEFAULT_CONNECTION_CLASS, DEFAULT_JOB_CLASS, DEFAULT_QUEUE_CLASS, DEFAULT_WORKER_CLASS) from rq.logutils import setup_loghandlers from rq.utils import import_attribute from rq.worker import WorkerStatus red = partial(click.style, fg='red') green = partial(click.style, fg='green') yellow = partial(click.style, fg='yellow') def read_config_file(module): """Reads all UPPERCASE variables defined in the given module file.""" settings = importlib.import_module(module) return dict([(k, v) for k, v in settings.__dict__.items() if k.upper() == k]) def get_redis_from_config(settings, connection_class=Redis): """Returns a StrictRedis instance from a dictionary of settings. To use redis sentinel, you must specify a dictionary in the configuration file. Example of a dictionary with keys without values: SENTINEL: {'INSTANCES':, 'SOCKET_TIMEOUT':, 'PASSWORD':,'DB':, 'MASTER_NAME':} """ if settings.get('REDIS_URL') is not None: return connection_class.from_url(settings['REDIS_URL']) elif settings.get('SENTINEL') is not None: instances = settings['SENTINEL'].get('INSTANCES', [('localhost', 26379)]) socket_timeout = settings['SENTINEL'].get('SOCKET_TIMEOUT', None) password = settings['SENTINEL'].get('PASSWORD', None) db = settings['SENTINEL'].get('DB', 0) master_name = settings['SENTINEL'].get('MASTER_NAME', 'mymaster') sn = Sentinel(instances, socket_timeout=socket_timeout, password=password, db=db) return sn.master_for(master_name) kwargs = { 'host': settings.get('REDIS_HOST', 'localhost'), 'port': settings.get('REDIS_PORT', 6379), 'db': settings.get('REDIS_DB', 0), 'password': settings.get('REDIS_PASSWORD', None), 'ssl': settings.get('REDIS_SSL', False), } return connection_class(**kwargs) def pad(s, pad_to_length): """Pads the given string to the given length.""" return ('%-' + '%ds' % pad_to_length) % (s,) def get_scale(x): """Finds the lowest scale where x <= scale.""" scales = [20, 50, 100, 200, 400, 600, 800, 1000] for scale in scales: if x <= scale: return scale return x def state_symbol(state): symbols = { WorkerStatus.BUSY: red('busy'), WorkerStatus.IDLE: green('idle'), WorkerStatus.SUSPENDED: yellow('suspended'), } try: return symbols[state] except KeyError: return state def show_queues(queues, raw, by_queue, queue_class, worker_class): num_jobs = 0 termwidth, _ = click.get_terminal_size() chartwidth = min(20, termwidth - 20) max_count = 0 counts = dict() for q in queues: count = q.count counts[q] = count max_count = max(max_count, count) scale = get_scale(max_count) ratio = chartwidth * 1.0 / scale for q in queues: count = counts[q] if not raw: chart = green('|' + '█' * int(ratio * count)) line = '%-12s %s %d' % (q.name, chart, count) else: line = 'queue %s %d' % (q.name, count) click.echo(line) num_jobs += count # print summary when not in raw mode if not raw: click.echo('%d queues, %d jobs total' % (len(queues), num_jobs)) def show_workers(queues, raw, by_queue, queue_class, worker_class): workers = set() if queues: for queue in queues: for worker in worker_class.all(queue=queue): workers.add(worker) else: for worker in worker_class.all(): workers.add(worker) if not by_queue: for worker in workers: queue_names = ', '.join(worker.queue_names()) name = '%s (%s %s)' % (worker.name, worker.hostname, worker.pid) if not raw: click.echo('%s: %s %s' % (name, state_symbol(worker.get_state()), queue_names)) else: click.echo('worker %s %s %s' % (name, worker.get_state(), queue_names)) else: # Display workers by queue queue_dict = {} for queue in queues: queue_dict[queue] = worker_class.all(queue=queue) if queue_dict: max_length = max([len(q.name) for q, in queue_dict.keys()]) else: max_length = 0 for queue in queue_dict: if queue_dict[queue]: queues_str = ", ".join( sorted( map(lambda w: '%s (%s)' % (w.name, state_symbol(w.get_state())), queue_dict[queue]) ) ) else: queues_str = '–' click.echo('%s %s' % (pad(queue.name + ':', max_length + 1), queues_str)) if not raw: click.echo('%d workers, %d queues' % (len(workers), len(queues))) def show_both(queues, raw, by_queue, queue_class, worker_class): show_queues(queues, raw, by_queue, queue_class, worker_class) if not raw: click.echo('') show_workers(queues, raw, by_queue, queue_class, worker_class) if not raw: click.echo('') import datetime click.echo('Updated: %s' % datetime.datetime.now()) def refresh(interval, func, *args): while True: if interval: click.clear() func(*args) if interval: time.sleep(interval) else: break def setup_loghandlers_from_args(verbose, quiet, date_format, log_format): if verbose and quiet: raise RuntimeError("Flags --verbose and --quiet are mutually exclusive.") if verbose: level = 'DEBUG' elif quiet: level = 'WARNING' else: level = 'INFO' setup_loghandlers(level, date_format=date_format, log_format=log_format) class CliConfig(object): """A helper class to be used with click commands, to handle shared options""" def __init__(self, url=None, config=None, worker_class=DEFAULT_WORKER_CLASS, job_class=DEFAULT_JOB_CLASS, queue_class=DEFAULT_QUEUE_CLASS, connection_class=DEFAULT_CONNECTION_CLASS, path=None, *args, **kwargs): self._connection = None self.url = url self.config = config if path: for pth in path: sys.path.append(pth) try: self.worker_class = import_attribute(worker_class) except (ImportError, AttributeError) as exc: raise click.BadParameter(str(exc), param_hint='--worker-class') try: self.job_class = import_attribute(job_class) except (ImportError, AttributeError) as exc: raise click.BadParameter(str(exc), param_hint='--job-class') try: self.queue_class = import_attribute(queue_class) except (ImportError, AttributeError) as exc: raise click.BadParameter(str(exc), param_hint='--queue-class') try: self.connection_class = import_attribute(connection_class) except (ImportError, AttributeError) as exc: raise click.BadParameter(str(exc), param_hint='--connection-class') @property def connection(self): if self._connection is None: if self.url: self._connection = self.connection_class.from_url(self.url) else: settings = read_config_file(self.config) if self.config else {} self._connection = get_redis_from_config(settings, self.connection_class) return self._connection rq-1.2.2/rq/contrib/0000755000076500000240000000000013615036417014652 5ustar selwinstaff00000000000000rq-1.2.2/rq/contrib/legacy.py0000644000076500000240000000204713566232513016473 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) import logging from rq import get_current_connection from rq import Worker logger = logging.getLogger(__name__) def cleanup_ghosts(conn=None): """ RQ versions < 0.3.6 suffered from a race condition where workers, when abruptly terminated, did not have a chance to clean up their worker registration, leading to reports of ghosted workers in `rqinfo`. Since 0.3.6, new worker registrations automatically expire, and the worker will make sure to refresh the registrations as long as it's alive. This function will clean up any of such legacy ghosted workers. """ conn = conn if conn else get_current_connection() for worker in Worker.all(connection=conn): if conn.ttl(worker.key) == -1: ttl = worker.default_worker_ttl conn.expire(worker.key, ttl) logger.info('Marked ghosted worker {0} to expire in {1} seconds.'.format(worker.name, ttl)) rq-1.2.2/rq/contrib/__init__.py0000644000076500000240000000000013452015224016741 0ustar selwinstaff00000000000000rq-1.2.2/rq/contrib/sentry.py0000644000076500000240000000070213566232513016547 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) def register_sentry(sentry_dsn): """Given a Raven client and an RQ worker, registers exception handlers with the worker so exceptions are logged to Sentry. """ import sentry_sdk from sentry_sdk.integrations.rq import RqIntegration sentry_sdk.init(sentry_dsn, integrations=[RqIntegration()]) rq-1.2.2/rq/utils.py0000644000076500000240000001751213604001573014723 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- """ Miscellaneous helper functions. The formatter for ANSI colored console output is heavily based on Pygments terminal colorizing code, originally by Georg Brandl. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import calendar import datetime import importlib import logging import numbers import sys try: from collections.abc import Iterable except ImportError: from collections import Iterable from .compat import as_text, is_python_version, string_types from .exceptions import TimeoutFormatError class _Colorizer(object): def __init__(self): esc = "\x1b[" self.codes = {} self.codes[""] = "" self.codes["reset"] = esc + "39;49;00m" self.codes["bold"] = esc + "01m" self.codes["faint"] = esc + "02m" self.codes["standout"] = esc + "03m" self.codes["underline"] = esc + "04m" self.codes["blink"] = esc + "05m" self.codes["overline"] = esc + "06m" dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue", "purple", "teal", "lightgray"] light_colors = ["darkgray", "red", "green", "yellow", "blue", "fuchsia", "turquoise", "white"] x = 30 for d, l in zip(dark_colors, light_colors): self.codes[d] = esc + "%im" % x self.codes[l] = esc + "%i;01m" % x x += 1 del d, l, x self.codes["darkteal"] = self.codes["turquoise"] self.codes["darkyellow"] = self.codes["brown"] self.codes["fuscia"] = self.codes["fuchsia"] self.codes["white"] = self.codes["bold"] if hasattr(sys.stdout, "isatty"): self.notty = not sys.stdout.isatty() else: self.notty = True def reset_color(self): return self.codes["reset"] def colorize(self, color_key, text): if self.notty: return text else: return self.codes[color_key] + text + self.codes["reset"] def ansiformat(self, attr, text): """ Format ``text`` with a color and/or some attributes:: color normal color *color* bold color _color_ underlined color +color+ blinking color """ result = [] if attr[:1] == attr[-1:] == '+': result.append(self.codes['blink']) attr = attr[1:-1] if attr[:1] == attr[-1:] == '*': result.append(self.codes['bold']) attr = attr[1:-1] if attr[:1] == attr[-1:] == '_': result.append(self.codes['underline']) attr = attr[1:-1] result.append(self.codes[attr]) result.append(text) result.append(self.codes['reset']) return ''.join(result) colorizer = _Colorizer() def make_colorizer(color): """Creates a function that colorizes text with the given color. For example: green = make_colorizer('darkgreen') red = make_colorizer('red') Then, you can use: print "It's either " + green('OK') + ' or ' + red('Oops') """ def inner(text): return colorizer.colorize(color, text) return inner class ColorizingStreamHandler(logging.StreamHandler): levels = { logging.WARNING: make_colorizer('darkyellow'), logging.ERROR: make_colorizer('darkred'), logging.CRITICAL: make_colorizer('darkred'), } def __init__(self, exclude=None, *args, **kwargs): self.exclude = exclude super(ColorizingStreamHandler, self).__init__(*args, **kwargs) @property def is_tty(self): isatty = getattr(self.stream, 'isatty', None) return isatty and isatty() def format(self, record): message = logging.StreamHandler.format(self, record) if self.is_tty: colorize = self.levels.get(record.levelno, lambda x: x) # Don't colorize any traceback parts = message.split('\n', 1) parts[0] = " ".join([parts[0].split(" ", 1)[0], colorize(parts[0].split(" ", 1)[1])]) message = '\n'.join(parts) return message def import_attribute(name): """Return an attribute from a dotted path name (e.g. "path.to.func").""" module_name, attribute = name.rsplit('.', 1) module = importlib.import_module(module_name) return getattr(module, attribute) def utcnow(): return datetime.datetime.utcnow() _TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' def utcformat(dt): return dt.strftime(as_text(_TIMESTAMP_FORMAT)) def utcparse(string): try: return datetime.datetime.strptime(string, _TIMESTAMP_FORMAT) except ValueError: # This catches any jobs remain with old datetime format return datetime.datetime.strptime(string, '%Y-%m-%dT%H:%M:%SZ') def first(iterable, default=None, key=None): """ Return first element of `iterable` that evaluates true, else return None (or an optional default value). >>> first([0, False, None, [], (), 42]) 42 >>> first([0, False, None, [], ()]) is None True >>> first([0, False, None, [], ()], default='ohai') 'ohai' >>> import re >>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)']) >>> m.group(1) 'bc' The optional `key` argument specifies a one-argument predicate function like that used for `filter()`. The `key` argument, if supplied, must be in keyword form. For example: >>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0) 4 """ if key is None: for el in iterable: if el: return el else: for el in iterable: if key(el): return el return default def is_nonstring_iterable(obj): """Returns whether the obj is an iterable, but not a string""" return isinstance(obj, Iterable) and not isinstance(obj, string_types) def ensure_list(obj): """ When passed an iterable of objects, does nothing, otherwise, it returns a list with just that object in it. """ return obj if is_nonstring_iterable(obj) else [obj] def current_timestamp(): """Returns current UTC timestamp""" return calendar.timegm(datetime.datetime.utcnow().utctimetuple()) def enum(name, *sequential, **named): values = dict(zip(sequential, range(len(sequential))), **named) # NOTE: Yes, we *really* want to cast using str() here. # On Python 2 type() requires a byte string (which is str() on Python 2). # On Python 3 it does not matter, so we'll use str(), which acts as # a no-op. return type(str(name), (), values) def backend_class(holder, default_name, override=None): """Get a backend class using its default attribute name or an override""" if override is None: return getattr(holder, default_name) elif isinstance(override, string_types): return import_attribute(override) else: return override def str_to_date(date_str): if date_str is None: return else: return utcparse(as_text(date_str)) def parse_timeout(timeout): """Transfer all kinds of timeout format to an integer representing seconds""" if not isinstance(timeout, numbers.Integral) and timeout is not None: try: timeout = int(timeout) except ValueError: digit, unit = timeout[:-1], (timeout[-1:]).lower() unit_second = {'d': 86400, 'h': 3600, 'm': 60, 's': 1} try: timeout = int(digit) * unit_second[unit] except (ValueError, KeyError): raise TimeoutFormatError('Timeout must be an integer or a string representing an integer, or ' 'a string with format: digits + unit, unit can be "d", "h", "m", "s", ' 'such as "1h", "23m".') return timeout rq-1.2.2/rq/exceptions.py0000644000076500000240000000145113566232513015746 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) class NoSuchJobError(Exception): pass class InvalidJobDependency(Exception): pass class InvalidJobOperationError(Exception): pass class InvalidJobOperation(Exception): pass class UnpickleError(Exception): def __init__(self, message, raw_data, inner_exception=None): super(UnpickleError, self).__init__(message, inner_exception) self.raw_data = raw_data class DequeueTimeout(Exception): pass class ShutDownImminentException(Exception): def __init__(self, msg, extra_info): self.extra_info = extra_info super(ShutDownImminentException, self).__init__(msg) class TimeoutFormatError(Exception): pass rq-1.2.2/rq/defaults.py0000644000076500000240000000056513566232513015401 0ustar selwinstaff00000000000000DEFAULT_JOB_CLASS = 'rq.job.Job' DEFAULT_QUEUE_CLASS = 'rq.Queue' DEFAULT_WORKER_CLASS = 'rq.Worker' DEFAULT_CONNECTION_CLASS = 'redis.Redis' DEFAULT_WORKER_TTL = 420 DEFAULT_JOB_MONITORING_INTERVAL = 30 DEFAULT_RESULT_TTL = 500 DEFAULT_FAILURE_TTL = 31536000 # 1 year in seconds DEFAULT_LOGGING_DATE_FORMAT = '%H:%M:%S' DEFAULT_LOGGING_FORMAT = '%(asctime)s %(message)s' rq-1.2.2/rq/scheduler.py0000644000076500000240000001513013611564137015543 0ustar selwinstaff00000000000000import logging import os import signal import time import traceback from datetime import datetime from multiprocessing import Process from .job import Job from .queue import Queue from .registry import ScheduledJobRegistry from .utils import current_timestamp, enum from .logutils import setup_loghandlers SCHEDULER_KEY_TEMPLATE = 'rq:scheduler:%s' SCHEDULER_LOCKING_KEY_TEMPLATE = 'rq:scheduler-lock:%s' setup_loghandlers( level=logging.INFO, name="rq.scheduler", log_format="%(asctime)s: %(message)s", date_format="%H:%M:%S" ) class RQScheduler(object): # STARTED: scheduler has been started but sleeping # WORKING: scheduler is in the midst of scheduling jobs # STOPPED: scheduler is in stopped condition Status = enum( 'SchedulerStatus', STARTED='started', WORKING='working', STOPPED='stopped' ) def __init__(self, queues, connection, interval=1): self._queue_names = set(parse_names(queues)) self._acquired_locks = set([]) self._scheduled_job_registries = [] self.lock_acquisition_time = None self.connection = connection self.interval = interval self._stop_requested = False self._status = self.Status.STOPPED self._process = None @property def acquired_locks(self): return self._acquired_locks @property def status(self): return self._status @property def should_reacquire_locks(self): """Returns True if lock_acquisition_time is longer than 15 minutes ago""" if self._queue_names == self.acquired_locks: return False if not self.lock_acquisition_time: return True return (datetime.now() - self.lock_acquisition_time).total_seconds() > 900 def acquire_locks(self, auto_start=False): """Returns names of queue it successfully acquires lock on""" successful_locks = set([]) pid = os.getpid() logging.info("Trying to acquire locks for %s", ", ".join(self._queue_names)) for name in self._queue_names: if self.connection.set(self.get_locking_key(name), pid, nx=True, ex=5): successful_locks.add(name) self._acquired_locks = self._acquired_locks.union(successful_locks) if self._acquired_locks: self.prepare_registries(self._acquired_locks) self.lock_acquisition_time = datetime.now() # If auto_start is requested and scheduler is not started, # run self.start() if self._acquired_locks and auto_start: if not self._process: self.start() return successful_locks def prepare_registries(self, queue_names): """Prepare scheduled job registries for use""" self._scheduled_job_registries = [] for name in queue_names: self._scheduled_job_registries.append( ScheduledJobRegistry(name, connection=self.connection) ) @classmethod def get_locking_key(self, name): """Returns scheduler key for a given queue name""" return SCHEDULER_LOCKING_KEY_TEMPLATE % name def enqueue_scheduled_jobs(self): """Enqueue jobs whose timestamp is in the past""" self._status = self.Status.WORKING for registry in self._scheduled_job_registries: timestamp = current_timestamp() # TODO: try to use Lua script to make get_jobs_to_schedule() # and remove_jobs() atomic job_ids = registry.get_jobs_to_schedule(timestamp) if not job_ids: continue queue = Queue(registry.name, connection=self.connection) with self.connection.pipeline() as pipeline: # This should be done in bulk for job_id in job_ids: job = Job.fetch(job_id, connection=self.connection) queue.enqueue_job(job, pipeline=pipeline) registry.remove_jobs(timestamp) pipeline.execute() self._status = self.Status.STARTED def _install_signal_handlers(self): """Installs signal handlers for handling SIGINT and SIGTERM gracefully. """ signal.signal(signal.SIGINT, self.request_stop) signal.signal(signal.SIGTERM, self.request_stop) def request_stop(self, signum=None, frame=None): """Toggle self._stop_requested that's checked on every loop""" self._stop_requested = True def heartbeat(self): """Updates the TTL on scheduler keys and the locks""" logging.info("Scheduler sending heartbeat to %s", ", ".join(self.acquired_locks)) if len(self._queue_names) > 1: with self.connection.pipeline() as pipeline: for name in self._queue_names: key = self.get_locking_key(name) pipeline.expire(key, self.interval + 5) pipeline.execute() else: key = self.get_locking_key(next(iter(self._queue_names))) self.connection.expire(key, self.interval + 5) def stop(self): logging.info("Scheduler stopping, releasing locks for %s...", ','.join(self._queue_names)) keys = [self.get_locking_key(name) for name in self._queue_names] self.connection.delete(*keys) self._status = self.Status.STOPPED def start(self): self._status = self.Status.STARTED self._process = Process(target=run, args=(self,), name='Scheduler') self._process.start() return self._process def work(self): self._install_signal_handlers() while True: if self._stop_requested: self.stop() break if self.should_reacquire_locks: self.acquire_locks() self.enqueue_scheduled_jobs() self.heartbeat() time.sleep(self.interval) def run(scheduler): logging.info("Scheduler for %s started with PID %s", ','.join(scheduler._queue_names), os.getpid()) try: scheduler.work() except: # noqa logging.error( 'Scheduler [PID %s] raised an exception.\n%s', os.getpid(), traceback.format_exc() ) raise logging.info("Scheduler with PID %s has stopped", os.getpid()) def parse_names(queues_or_names): """Given a list of strings or queues, returns queue names""" names = [] for queue_or_name in queues_or_names: if isinstance(queue_or_name, Queue): names.append(queue_or_name.name) else: names.append(str(queue_or_name)) return names rq-1.2.2/rq/dummy.py0000644000076500000240000000123613566232513014721 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- """ Some dummy tasks that are well-suited for generating load for testing purposes. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import random import time def do_nothing(): pass def sleep(secs): time.sleep(secs) def endless_loop(): while True: time.sleep(1) def div_by_zero(): 1 / 0 def fib(n): if n <= 1: return 1 else: return fib(n - 2) + fib(n - 1) def random_failure(): if random.choice([True, False]): class RandomError(Exception): pass raise RandomError('Ouch!') return 'OK' rq-1.2.2/rq/worker_registration.py0000644000076500000240000000371413566232513017674 0ustar selwinstaff00000000000000from .compat import as_text WORKERS_BY_QUEUE_KEY = 'rq:workers:%s' REDIS_WORKER_KEYS = 'rq:workers' def register(worker, pipeline=None): """Store worker key in Redis so we can easily discover active workers.""" connection = pipeline if pipeline is not None else worker.connection connection.sadd(worker.redis_workers_keys, worker.key) for name in worker.queue_names(): redis_key = WORKERS_BY_QUEUE_KEY % name connection.sadd(redis_key, worker.key) def unregister(worker, pipeline=None): """Remove worker key from Redis.""" if pipeline is None: connection = worker.connection.pipeline() else: connection = pipeline connection.srem(worker.redis_workers_keys, worker.key) for name in worker.queue_names(): redis_key = WORKERS_BY_QUEUE_KEY % name connection.srem(redis_key, worker.key) if pipeline is None: connection.execute() def get_keys(queue=None, connection=None): """Returnes a list of worker keys for a queue""" if queue is None and connection is None: raise ValueError('"queue" or "connection" argument is required') if queue: redis = queue.connection redis_key = WORKERS_BY_QUEUE_KEY % queue.name else: redis = connection redis_key = REDIS_WORKER_KEYS return {as_text(key) for key in redis.smembers(redis_key)} def clean_worker_registry(queue): """Delete invalid worker keys in registry""" keys = list(get_keys(queue)) with queue.connection.pipeline() as pipeline: for key in keys: pipeline.exists(key) results = pipeline.execute() invalid_keys = [] for i, key_exists in enumerate(results): if not key_exists: invalid_keys.append(keys[i]) if invalid_keys: pipeline.srem(WORKERS_BY_QUEUE_KEY % queue.name, *invalid_keys) pipeline.srem(REDIS_WORKER_KEYS, *invalid_keys) pipeline.execute() rq-1.2.2/rq/decorators.py0000644000076500000240000000462513566232513015740 0ustar selwinstaff00000000000000# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) from functools import wraps from rq.compat import string_types from .defaults import DEFAULT_RESULT_TTL from .queue import Queue from .utils import backend_class class job(object): # noqa queue_class = Queue def __init__(self, queue, connection=None, timeout=None, result_ttl=DEFAULT_RESULT_TTL, ttl=None, queue_class=None, depends_on=None, at_front=None, meta=None, description=None, failure_ttl=None): """A decorator that adds a ``delay`` method to the decorated function, which in turn creates a RQ job when called. Accepts a required ``queue`` argument that can be either a ``Queue`` instance or a string denoting the queue name. For example: @job(queue='default') def simple_add(x, y): return x + y simple_add.delay(1, 2) # Puts simple_add function into queue """ self.queue = queue self.queue_class = backend_class(self, 'queue_class', override=queue_class) self.connection = connection self.timeout = timeout self.result_ttl = result_ttl self.ttl = ttl self.meta = meta self.depends_on = depends_on self.at_front = at_front self.description = description self.failure_ttl = failure_ttl def __call__(self, f): @wraps(f) def delay(*args, **kwargs): if isinstance(self.queue, string_types): queue = self.queue_class(name=self.queue, connection=self.connection) else: queue = self.queue depends_on = kwargs.pop('depends_on', None) at_front = kwargs.pop('at_front', False) if not depends_on: depends_on = self.depends_on if not at_front: at_front = self.at_front return queue.enqueue_call(f, args=args, kwargs=kwargs, timeout=self.timeout, result_ttl=self.result_ttl, ttl=self.ttl, depends_on=depends_on, at_front=at_front, meta=self.meta, description=self.description, failure_ttl=self.failure_ttl) f.delay = delay return f