pax_global_header00006660000000000000000000000064135602215370014516gustar00rootroot0000000000000052 comment=028506a816c74ee05951717c0e45d2e6ad32773e apscheduler-3.6.3/000077500000000000000000000000001356022153700140265ustar00rootroot00000000000000apscheduler-3.6.3/.github/000077500000000000000000000000001356022153700153665ustar00rootroot00000000000000apscheduler-3.6.3/.github/ISSUE_TEMPLATE.md000066400000000000000000000021561356022153700200770ustar00rootroot00000000000000This issue tracker is **ONLY** for reporting bugs. Issues other than bug reports will be summarily closed. Any support questions or feature requests should be directed to: * [StackOverflow](http://stackoverflow.com/questions/tagged/apscheduler) * [Gitter](https://gitter.im/apscheduler/Lobby) * [Google groups](http://groups.google.com/group/apscheduler) ## Expected Behavior ## Current Behavior ## Steps to Reproduce 1. 2. 3. 4. ## Context (Environment) ## Detailed Description apscheduler-3.6.3/.gitignore000066400000000000000000000002231356022153700160130ustar00rootroot00000000000000.project .pydevproject .idea/ .coverage .cache/ .pytest_cache/ .tox/ .eggs/ *.egg-info/ *.pyc dist/ docs/_build/ build/ virtualenv/ example.sqlite apscheduler-3.6.3/.mailmap000066400000000000000000000002161356022153700154460ustar00rootroot00000000000000Alex Grönholm agronholm Alex Grönholm demigod apscheduler-3.6.3/.readthedocs.yml000066400000000000000000000003161356022153700171140ustar00rootroot00000000000000type: sphinx python: version: "3.5" pip_install: true extra_requirements: - gevent - mongodb - redis - rethinkdb - sqlalchemy - tornado - twisted - zookeeper - doc apscheduler-3.6.3/.travis.yml000066400000000000000000000035171356022153700161450ustar00rootroot00000000000000dist: xenial language: python cache: pip python: "3.6" stages: - name: static analysis - name: test - name: deploy to pypi if: type = push AND tag =~ ^v\d+\.\d+\.\d+ jobs: include: - stage: static analysis env: TOXENV=flake8 - &test stage: test env: TOXENV=pypy python: pypy2.7-6.0 before_install: docker-compose up -d after_success: - pip install coveralls - coveralls - <<: *test env: TOXENV=pypy3 python: pypy3.5-6.0 - <<: *test env: TOXENV=py27 python: "2.7" - <<: *test env: TOXENV=py34 python: "3.4" - <<: *test env: TOXENV=py35 python: "3.5" - <<: *test env: TOXENV=py36 python: "3.6" - <<: *test env: TOXENV=py37 python: "3.7" - stage: deploy to pypi install: true script: skip deploy: provider: pypi user: agronholm password: secure: YXuMMv+Ukr49mhiMzy8+bXDilRo52y6YrEzxlLnEksLlq0BNjgRcAsiDzo46k6xuxGxo7XsWaGEP6TBPwTR6eVgmWKjEPrnqyQepRV9Ibrw7+V7DzJn4pBZWX9Pg2oe3WMlYhooIvy6tRDfSaB8cc06/k0ftSNbzAjzpeVk8k0VyLRsBRBqOsrnzvqlRNOUWi0dlgGqhZvGkDMpmi+WVv6rW3y/VaV+vRK88luKHXUy48eMEGPnL3xCgr727es5qmAP9fDKVGKpXvpGzZyRoeMor2HQwj5AFt9DH4YoBbzJRJESSgI4DugtgGPG7Uz1Xx+TxZY609UbR1kju00Vl1KsnGBsxvR0okmItcTzIgp1IRg8MVDGc+duD3oCjlFQEynJL/RExY/yTQfN5rJFtV6nrHrMVgaL2N2dEVFV8zxGvmrptKRq2ujuJ5t2ssMkKAwXRxg2wXQwCjrxUDRMWd7VfRgGadTTdM6TzoeMo8vlL2FA/RvcFVSnfItrSS4ip2FyswzevVyCQMdblLlnlwFG5YTKMAfljd7JzkyA02IND/nFXGcJ3BsP4ciOXoa49PvD+qbNwqL+6v8I5wCYfPlpqfpf9um2SLDYDE6Ka44p1MvnF3me3oaegt2SX92eDvkZN1TICdOxQuvTTGEx7pW8wJS3opBVaQ7Pes5PHO60= distributions: sdist bdist_wheel on: tags: true install: pip install tox script: tox notifications: webhooks: urls: - https://webhooks.gitter.im/e/f41346c0f7c22d4fe002 apscheduler-3.6.3/LICENSE.txt000066400000000000000000000021521356022153700156510ustar00rootroot00000000000000This is the MIT license: http://www.opensource.org/licenses/mit-license.php Copyright (c) Alex Grönholm Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. apscheduler-3.6.3/README.rst000066400000000000000000000061741356022153700155250ustar00rootroot00000000000000.. image:: https://travis-ci.com/agronholm/apscheduler.svg?branch=master :target: https://travis-ci.com/agronholm/apscheduler :alt: Build Status .. image:: https://coveralls.io/repos/github/agronholm/apscheduler/badge.svg?branch=master :target: https://coveralls.io/github/agronholm/apscheduler?branch=master :alt: Code Coverage Advanced Python Scheduler (APScheduler) is a Python library that lets you schedule your Python code to be executed later, either just once or periodically. You can add new jobs or remove old ones on the fly as you please. If you store your jobs in a database, they will also survive scheduler restarts and maintain their state. When the scheduler is restarted, it will then run all the jobs it should have run while it was offline [#f1]_. Among other things, APScheduler can be used as a cross-platform, application specific replacement to platform specific schedulers, such as the cron daemon or the Windows task scheduler. Please note, however, that APScheduler is **not** a daemon or service itself, nor does it come with any command line tools. It is primarily meant to be run inside existing applications. That said, APScheduler does provide some building blocks for you to build a scheduler service or to run a dedicated scheduler process. APScheduler has three built-in scheduling systems you can use: * Cron-style scheduling (with optional start/end times) * Interval-based execution (runs jobs on even intervals, with optional start/end times) * One-off delayed execution (runs jobs once, on a set date/time) You can mix and match scheduling systems and the backends where the jobs are stored any way you like. Supported backends for storing jobs include: * Memory * `SQLAlchemy `_ (any RDBMS supported by SQLAlchemy works) * `MongoDB `_ * `Redis `_ * `RethinkDB `_ * `ZooKeeper `_ APScheduler also integrates with several common Python frameworks, like: * `asyncio `_ (:pep:`3156`) * `gevent `_ * `Tornado `_ * `Twisted `_ * `Qt `_ (using either `PyQt `_ or `PySide `_) .. [#f1] The cutoff period for this is also configurable. Documentation ------------- Documentation can be found `here `_. Source ------ The source can be browsed at `Github `_. Reporting bugs -------------- A `bug tracker `_ is provided by Github. Getting help ------------ If you have problems or other questions, you can either: * Ask in the `apscheduler `_ room on Gitter * Ask on the `APScheduler Google group `_, or * Ask on `StackOverflow `_ and tag your question with the ``apscheduler`` tag apscheduler-3.6.3/apscheduler/000077500000000000000000000000001356022153700163255ustar00rootroot00000000000000apscheduler-3.6.3/apscheduler/__init__.py000066400000000000000000000005741356022153700204440ustar00rootroot00000000000000from pkg_resources import get_distribution, DistributionNotFound try: release = get_distribution('APScheduler').version.split('-')[0] except DistributionNotFound: release = '3.5.0' version_info = tuple(int(x) if x.isdigit() else x for x in release.split('.')) version = __version__ = '.'.join(str(x) for x in version_info[:3]) del get_distribution, DistributionNotFound apscheduler-3.6.3/apscheduler/events.py000066400000000000000000000070111356022153700202020ustar00rootroot00000000000000__all__ = ('EVENT_SCHEDULER_STARTED', 'EVENT_SCHEDULER_SHUTDOWN', 'EVENT_SCHEDULER_PAUSED', 'EVENT_SCHEDULER_RESUMED', 'EVENT_EXECUTOR_ADDED', 'EVENT_EXECUTOR_REMOVED', 'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED', 'EVENT_ALL_JOBS_REMOVED', 'EVENT_JOB_ADDED', 'EVENT_JOB_REMOVED', 'EVENT_JOB_MODIFIED', 'EVENT_JOB_EXECUTED', 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED', 'EVENT_JOB_SUBMITTED', 'EVENT_JOB_MAX_INSTANCES', 'SchedulerEvent', 'JobEvent', 'JobExecutionEvent', 'JobSubmissionEvent') EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0 EVENT_SCHEDULER_SHUTDOWN = 2 ** 1 EVENT_SCHEDULER_PAUSED = 2 ** 2 EVENT_SCHEDULER_RESUMED = 2 ** 3 EVENT_EXECUTOR_ADDED = 2 ** 4 EVENT_EXECUTOR_REMOVED = 2 ** 5 EVENT_JOBSTORE_ADDED = 2 ** 6 EVENT_JOBSTORE_REMOVED = 2 ** 7 EVENT_ALL_JOBS_REMOVED = 2 ** 8 EVENT_JOB_ADDED = 2 ** 9 EVENT_JOB_REMOVED = 2 ** 10 EVENT_JOB_MODIFIED = 2 ** 11 EVENT_JOB_EXECUTED = 2 ** 12 EVENT_JOB_ERROR = 2 ** 13 EVENT_JOB_MISSED = 2 ** 14 EVENT_JOB_SUBMITTED = 2 ** 15 EVENT_JOB_MAX_INSTANCES = 2 ** 16 EVENT_ALL = (EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED | EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED | EVENT_ALL_JOBS_REMOVED | EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_SUBMITTED | EVENT_JOB_MAX_INSTANCES) class SchedulerEvent(object): """ An event that concerns the scheduler itself. :ivar code: the type code of this event :ivar alias: alias of the job store or executor that was added or removed (if applicable) """ def __init__(self, code, alias=None): super(SchedulerEvent, self).__init__() self.code = code self.alias = alias def __repr__(self): return '<%s (code=%d)>' % (self.__class__.__name__, self.code) class JobEvent(SchedulerEvent): """ An event that concerns a job. :ivar code: the type code of this event :ivar job_id: identifier of the job in question :ivar jobstore: alias of the job store containing the job in question """ def __init__(self, code, job_id, jobstore): super(JobEvent, self).__init__(code) self.code = code self.job_id = job_id self.jobstore = jobstore class JobSubmissionEvent(JobEvent): """ An event that concerns the submission of a job to its executor. :ivar scheduled_run_times: a list of datetimes when the job was intended to run """ def __init__(self, code, job_id, jobstore, scheduled_run_times): super(JobSubmissionEvent, self).__init__(code, job_id, jobstore) self.scheduled_run_times = scheduled_run_times class JobExecutionEvent(JobEvent): """ An event that concerns the running of a job within its executor. :ivar scheduled_run_time: the time when the job was scheduled to be run :ivar retval: the return value of the successfully executed job :ivar exception: the exception raised by the job :ivar traceback: a formatted traceback for the exception """ def __init__(self, code, job_id, jobstore, scheduled_run_time, retval=None, exception=None, traceback=None): super(JobExecutionEvent, self).__init__(code, job_id, jobstore) self.scheduled_run_time = scheduled_run_time self.retval = retval self.exception = exception self.traceback = traceback apscheduler-3.6.3/apscheduler/executors/000077500000000000000000000000001356022153700203465ustar00rootroot00000000000000apscheduler-3.6.3/apscheduler/executors/__init__.py000066400000000000000000000000001356022153700224450ustar00rootroot00000000000000apscheduler-3.6.3/apscheduler/executors/asyncio.py000066400000000000000000000040471356022153700223720ustar00rootroot00000000000000from __future__ import absolute_import import sys from apscheduler.executors.base import BaseExecutor, run_job from apscheduler.util import iscoroutinefunction_partial try: from apscheduler.executors.base_py3 import run_coroutine_job except ImportError: run_coroutine_job = None class AsyncIOExecutor(BaseExecutor): """ Runs jobs in the default executor of the event loop. If the job function is a native coroutine function, it is scheduled to be run directly in the event loop as soon as possible. All other functions are run in the event loop's default executor which is usually a thread pool. Plugin alias: ``asyncio`` """ def start(self, scheduler, alias): super(AsyncIOExecutor, self).start(scheduler, alias) self._eventloop = scheduler._eventloop self._pending_futures = set() def shutdown(self, wait=True): # There is no way to honor wait=True without converting this method into a coroutine method for f in self._pending_futures: if not f.done(): f.cancel() self._pending_futures.clear() def _do_submit_job(self, job, run_times): def callback(f): self._pending_futures.discard(f) try: events = f.result() except BaseException: self._run_job_error(job.id, *sys.exc_info()[1:]) else: self._run_job_success(job.id, events) if iscoroutinefunction_partial(job.func): if run_coroutine_job is not None: coro = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name) f = self._eventloop.create_task(coro) else: raise Exception('Executing coroutine based jobs is not supported with Trollius') else: f = self._eventloop.run_in_executor(None, run_job, job, job._jobstore_alias, run_times, self._logger.name) f.add_done_callback(callback) self._pending_futures.add(f) apscheduler-3.6.3/apscheduler/executors/base.py000066400000000000000000000123301356022153700216310ustar00rootroot00000000000000from abc import ABCMeta, abstractmethod from collections import defaultdict from datetime import datetime, timedelta from traceback import format_tb import logging import sys from pytz import utc import six from apscheduler.events import ( JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED) class MaxInstancesReachedError(Exception): def __init__(self, job): super(MaxInstancesReachedError, self).__init__( 'Job "%s" has already reached its maximum number of instances (%d)' % (job.id, job.max_instances)) class BaseExecutor(six.with_metaclass(ABCMeta, object)): """Abstract base class that defines the interface that every executor must implement.""" _scheduler = None _lock = None _logger = logging.getLogger('apscheduler.executors') def __init__(self): super(BaseExecutor, self).__init__() self._instances = defaultdict(lambda: 0) def start(self, scheduler, alias): """ Called by the scheduler when the scheduler is being started or when the executor is being added to an already running scheduler. :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting this executor :param str|unicode alias: alias of this executor as it was assigned to the scheduler """ self._scheduler = scheduler self._lock = scheduler._create_lock() self._logger = logging.getLogger('apscheduler.executors.%s' % alias) def shutdown(self, wait=True): """ Shuts down this executor. :param bool wait: ``True`` to wait until all submitted jobs have been executed """ def submit_job(self, job, run_times): """ Submits job for execution. :param Job job: job to execute :param list[datetime] run_times: list of datetimes specifying when the job should have been run :raises MaxInstancesReachedError: if the maximum number of allowed instances for this job has been reached """ assert self._lock is not None, 'This executor has not been started yet' with self._lock: if self._instances[job.id] >= job.max_instances: raise MaxInstancesReachedError(job) self._do_submit_job(job, run_times) self._instances[job.id] += 1 @abstractmethod def _do_submit_job(self, job, run_times): """Performs the actual task of scheduling `run_job` to be called.""" def _run_job_success(self, job_id, events): """ Called by the executor with the list of generated events when :func:`run_job` has been successfully called. """ with self._lock: self._instances[job_id] -= 1 if self._instances[job_id] == 0: del self._instances[job_id] for event in events: self._scheduler._dispatch_event(event) def _run_job_error(self, job_id, exc, traceback=None): """Called by the executor with the exception if there is an error calling `run_job`.""" with self._lock: self._instances[job_id] -= 1 if self._instances[job_id] == 0: del self._instances[job_id] exc_info = (exc.__class__, exc, traceback) self._logger.error('Error running job %s', job_id, exc_info=exc_info) def run_job(job, jobstore_alias, run_times, logger_name): """ Called by executors to run the job. Returns a list of scheduler events to be dispatched by the scheduler. """ events = [] logger = logging.getLogger(logger_name) for run_time in run_times: # See if the job missed its run time window, and handle # possible misfires accordingly if job.misfire_grace_time is not None: difference = datetime.now(utc) - run_time grace_time = timedelta(seconds=job.misfire_grace_time) if difference > grace_time: events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias, run_time)) logger.warning('Run time of job "%s" was missed by %s', job, difference) continue logger.info('Running job "%s" (scheduled at %s)', job, run_time) try: retval = job.func(*job.args, **job.kwargs) except BaseException: exc, tb = sys.exc_info()[1:] formatted_tb = ''.join(format_tb(tb)) events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time, exception=exc, traceback=formatted_tb)) logger.exception('Job "%s" raised an exception', job) # This is to prevent cyclic references that would lead to memory leaks if six.PY2: sys.exc_clear() del tb else: import traceback traceback.clear_frames(tb) del tb else: events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval)) logger.info('Job "%s" executed successfully', job) return events apscheduler-3.6.3/apscheduler/executors/base_py3.py000066400000000000000000000033571356022153700224350ustar00rootroot00000000000000import logging import sys from datetime import datetime, timedelta from traceback import format_tb from pytz import utc from apscheduler.events import ( JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED) async def run_coroutine_job(job, jobstore_alias, run_times, logger_name): """Coroutine version of run_job().""" events = [] logger = logging.getLogger(logger_name) for run_time in run_times: # See if the job missed its run time window, and handle possible misfires accordingly if job.misfire_grace_time is not None: difference = datetime.now(utc) - run_time grace_time = timedelta(seconds=job.misfire_grace_time) if difference > grace_time: events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias, run_time)) logger.warning('Run time of job "%s" was missed by %s', job, difference) continue logger.info('Running job "%s" (scheduled at %s)', job, run_time) try: retval = await job.func(*job.args, **job.kwargs) except BaseException: exc, tb = sys.exc_info()[1:] formatted_tb = ''.join(format_tb(tb)) events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time, exception=exc, traceback=formatted_tb)) logger.exception('Job "%s" raised an exception', job) else: events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval)) logger.info('Job "%s" executed successfully', job) return events apscheduler-3.6.3/apscheduler/executors/debug.py000066400000000000000000000010751356022153700220110ustar00rootroot00000000000000import sys from apscheduler.executors.base import BaseExecutor, run_job class DebugExecutor(BaseExecutor): """ A special executor that executes the target callable directly instead of deferring it to a thread or process. Plugin alias: ``debug`` """ def _do_submit_job(self, job, run_times): try: events = run_job(job, job._jobstore_alias, run_times, self._logger.name) except BaseException: self._run_job_error(job.id, *sys.exc_info()[1:]) else: self._run_job_success(job.id, events) apscheduler-3.6.3/apscheduler/executors/gevent.py000066400000000000000000000014111356022153700222050ustar00rootroot00000000000000from __future__ import absolute_import import sys from apscheduler.executors.base import BaseExecutor, run_job try: import gevent except ImportError: # pragma: nocover raise ImportError('GeventExecutor requires gevent installed') class GeventExecutor(BaseExecutor): """ Runs jobs as greenlets. Plugin alias: ``gevent`` """ def _do_submit_job(self, job, run_times): def callback(greenlet): try: events = greenlet.get() except BaseException: self._run_job_error(job.id, *sys.exc_info()[1:]) else: self._run_job_success(job.id, events) gevent.spawn(run_job, job, job._jobstore_alias, run_times, self._logger.name).\ link(callback) apscheduler-3.6.3/apscheduler/executors/pool.py000066400000000000000000000032101356022153700216650ustar00rootroot00000000000000from abc import abstractmethod import concurrent.futures from apscheduler.executors.base import BaseExecutor, run_job class BasePoolExecutor(BaseExecutor): @abstractmethod def __init__(self, pool): super(BasePoolExecutor, self).__init__() self._pool = pool def _do_submit_job(self, job, run_times): def callback(f): exc, tb = (f.exception_info() if hasattr(f, 'exception_info') else (f.exception(), getattr(f.exception(), '__traceback__', None))) if exc: self._run_job_error(job.id, exc, tb) else: self._run_job_success(job.id, f.result()) f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name) f.add_done_callback(callback) def shutdown(self, wait=True): self._pool.shutdown(wait) class ThreadPoolExecutor(BasePoolExecutor): """ An executor that runs jobs in a concurrent.futures thread pool. Plugin alias: ``threadpool`` :param max_workers: the maximum number of spawned threads. """ def __init__(self, max_workers=10): pool = concurrent.futures.ThreadPoolExecutor(int(max_workers)) super(ThreadPoolExecutor, self).__init__(pool) class ProcessPoolExecutor(BasePoolExecutor): """ An executor that runs jobs in a concurrent.futures process pool. Plugin alias: ``processpool`` :param max_workers: the maximum number of spawned processes. """ def __init__(self, max_workers=10): pool = concurrent.futures.ProcessPoolExecutor(int(max_workers)) super(ProcessPoolExecutor, self).__init__(pool) apscheduler-3.6.3/apscheduler/executors/tornado.py000066400000000000000000000033641356022153700223740ustar00rootroot00000000000000from __future__ import absolute_import import sys from concurrent.futures import ThreadPoolExecutor from tornado.gen import convert_yielded from apscheduler.executors.base import BaseExecutor, run_job try: from apscheduler.executors.base_py3 import run_coroutine_job from apscheduler.util import iscoroutinefunction_partial except ImportError: def iscoroutinefunction_partial(func): return False class TornadoExecutor(BaseExecutor): """ Runs jobs either in a thread pool or directly on the I/O loop. If the job function is a native coroutine function, it is scheduled to be run directly in the I/O loop as soon as possible. All other functions are run in a thread pool. Plugin alias: ``tornado`` :param int max_workers: maximum number of worker threads in the thread pool """ def __init__(self, max_workers=10): super(TornadoExecutor, self).__init__() self.executor = ThreadPoolExecutor(max_workers) def start(self, scheduler, alias): super(TornadoExecutor, self).start(scheduler, alias) self._ioloop = scheduler._ioloop def _do_submit_job(self, job, run_times): def callback(f): try: events = f.result() except BaseException: self._run_job_error(job.id, *sys.exc_info()[1:]) else: self._run_job_success(job.id, events) if iscoroutinefunction_partial(job.func): f = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name) else: f = self.executor.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name) f = convert_yielded(f) f.add_done_callback(callback) apscheduler-3.6.3/apscheduler/executors/twisted.py000066400000000000000000000014121356022153700224010ustar00rootroot00000000000000from __future__ import absolute_import from apscheduler.executors.base import BaseExecutor, run_job class TwistedExecutor(BaseExecutor): """ Runs jobs in the reactor's thread pool. Plugin alias: ``twisted`` """ def start(self, scheduler, alias): super(TwistedExecutor, self).start(scheduler, alias) self._reactor = scheduler._reactor def _do_submit_job(self, job, run_times): def callback(success, result): if success: self._run_job_success(job.id, result) else: self._run_job_error(job.id, result.value, result.tb) self._reactor.getThreadPool().callInThreadWithCallback( callback, run_job, job, job._jobstore_alias, run_times, self._logger.name) apscheduler-3.6.3/apscheduler/job.py000066400000000000000000000255201356022153700174550ustar00rootroot00000000000000from inspect import ismethod, isclass from uuid import uuid4 import six from apscheduler.triggers.base import BaseTrigger from apscheduler.util import ( ref_to_obj, obj_to_ref, datetime_repr, repr_escape, get_callable_name, check_callable_args, convert_to_datetime) try: from collections.abc import Iterable, Mapping except ImportError: from collections import Iterable, Mapping class Job(object): """ Contains the options given when scheduling callables and its current schedule and other state. This class should never be instantiated by the user. :var str id: the unique identifier of this job :var str name: the description of this job :var func: the callable to execute :var tuple|list args: positional arguments to the callable :var dict kwargs: keyword arguments to the callable :var bool coalesce: whether to only run the job once when several run times are due :var trigger: the trigger object that controls the schedule of this job :var str executor: the name of the executor that will run this job :var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to be late :var int max_instances: the maximum number of concurrently executing instances allowed for this job :var datetime.datetime next_run_time: the next scheduled run time of this job .. note:: The ``misfire_grace_time`` has some non-obvious effects on job execution. See the :ref:`missed-job-executions` section in the documentation for an in-depth explanation. """ __slots__ = ('_scheduler', '_jobstore_alias', 'id', 'trigger', 'executor', 'func', 'func_ref', 'args', 'kwargs', 'name', 'misfire_grace_time', 'coalesce', 'max_instances', 'next_run_time') def __init__(self, scheduler, id=None, **kwargs): super(Job, self).__init__() self._scheduler = scheduler self._jobstore_alias = None self._modify(id=id or uuid4().hex, **kwargs) def modify(self, **changes): """ Makes the given changes to this job and saves it in the associated job store. Accepted keyword arguments are the same as the variables on this class. .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job` :return Job: this job instance """ self._scheduler.modify_job(self.id, self._jobstore_alias, **changes) return self def reschedule(self, trigger, **trigger_args): """ Shortcut for switching the trigger on this job. .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job` :return Job: this job instance """ self._scheduler.reschedule_job(self.id, self._jobstore_alias, trigger, **trigger_args) return self def pause(self): """ Temporarily suspend the execution of this job. .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.pause_job` :return Job: this job instance """ self._scheduler.pause_job(self.id, self._jobstore_alias) return self def resume(self): """ Resume the schedule of this job if previously paused. .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.resume_job` :return Job: this job instance """ self._scheduler.resume_job(self.id, self._jobstore_alias) return self def remove(self): """ Unschedules this job and removes it from its associated job store. .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job` """ self._scheduler.remove_job(self.id, self._jobstore_alias) @property def pending(self): """ Returns ``True`` if the referenced job is still waiting to be added to its designated job store. """ return self._jobstore_alias is None # # Private API # def _get_run_times(self, now): """ Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive). :type now: datetime.datetime :rtype: list[datetime.datetime] """ run_times = [] next_run_time = self.next_run_time while next_run_time and next_run_time <= now: run_times.append(next_run_time) next_run_time = self.trigger.get_next_fire_time(next_run_time, now) return run_times def _modify(self, **changes): """ Validates the changes to the Job and makes the modifications if and only if all of them validate. """ approved = {} if 'id' in changes: value = changes.pop('id') if not isinstance(value, six.string_types): raise TypeError("id must be a nonempty string") if hasattr(self, 'id'): raise ValueError('The job ID may not be changed') approved['id'] = value if 'func' in changes or 'args' in changes or 'kwargs' in changes: func = changes.pop('func') if 'func' in changes else self.func args = changes.pop('args') if 'args' in changes else self.args kwargs = changes.pop('kwargs') if 'kwargs' in changes else self.kwargs if isinstance(func, six.string_types): func_ref = func func = ref_to_obj(func) elif callable(func): try: func_ref = obj_to_ref(func) except ValueError: # If this happens, this Job won't be serializable func_ref = None else: raise TypeError('func must be a callable or a textual reference to one') if not hasattr(self, 'name') and changes.get('name', None) is None: changes['name'] = get_callable_name(func) if isinstance(args, six.string_types) or not isinstance(args, Iterable): raise TypeError('args must be a non-string iterable') if isinstance(kwargs, six.string_types) or not isinstance(kwargs, Mapping): raise TypeError('kwargs must be a dict-like object') check_callable_args(func, args, kwargs) approved['func'] = func approved['func_ref'] = func_ref approved['args'] = args approved['kwargs'] = kwargs if 'name' in changes: value = changes.pop('name') if not value or not isinstance(value, six.string_types): raise TypeError("name must be a nonempty string") approved['name'] = value if 'misfire_grace_time' in changes: value = changes.pop('misfire_grace_time') if value is not None and (not isinstance(value, six.integer_types) or value <= 0): raise TypeError('misfire_grace_time must be either None or a positive integer') approved['misfire_grace_time'] = value if 'coalesce' in changes: value = bool(changes.pop('coalesce')) approved['coalesce'] = value if 'max_instances' in changes: value = changes.pop('max_instances') if not isinstance(value, six.integer_types) or value <= 0: raise TypeError('max_instances must be a positive integer') approved['max_instances'] = value if 'trigger' in changes: trigger = changes.pop('trigger') if not isinstance(trigger, BaseTrigger): raise TypeError('Expected a trigger instance, got %s instead' % trigger.__class__.__name__) approved['trigger'] = trigger if 'executor' in changes: value = changes.pop('executor') if not isinstance(value, six.string_types): raise TypeError('executor must be a string') approved['executor'] = value if 'next_run_time' in changes: value = changes.pop('next_run_time') approved['next_run_time'] = convert_to_datetime(value, self._scheduler.timezone, 'next_run_time') if changes: raise AttributeError('The following are not modifiable attributes of Job: %s' % ', '.join(changes)) for key, value in six.iteritems(approved): setattr(self, key, value) def __getstate__(self): # Don't allow this Job to be serialized if the function reference could not be determined if not self.func_ref: raise ValueError( 'This Job cannot be serialized since the reference to its callable (%r) could not ' 'be determined. Consider giving a textual reference (module:function name) ' 'instead.' % (self.func,)) # Instance methods cannot survive serialization as-is, so store the "self" argument # explicitly if ismethod(self.func) and not isclass(self.func.__self__): args = (self.func.__self__,) + tuple(self.args) else: args = self.args return { 'version': 1, 'id': self.id, 'func': self.func_ref, 'trigger': self.trigger, 'executor': self.executor, 'args': args, 'kwargs': self.kwargs, 'name': self.name, 'misfire_grace_time': self.misfire_grace_time, 'coalesce': self.coalesce, 'max_instances': self.max_instances, 'next_run_time': self.next_run_time } def __setstate__(self, state): if state.get('version', 1) > 1: raise ValueError('Job has version %s, but only version 1 can be handled' % state['version']) self.id = state['id'] self.func_ref = state['func'] self.func = ref_to_obj(self.func_ref) self.trigger = state['trigger'] self.executor = state['executor'] self.args = state['args'] self.kwargs = state['kwargs'] self.name = state['name'] self.misfire_grace_time = state['misfire_grace_time'] self.coalesce = state['coalesce'] self.max_instances = state['max_instances'] self.next_run_time = state['next_run_time'] def __eq__(self, other): if isinstance(other, Job): return self.id == other.id return NotImplemented def __repr__(self): return '' % (repr_escape(self.id), repr_escape(self.name)) def __str__(self): return repr_escape(self.__unicode__()) def __unicode__(self): if hasattr(self, 'next_run_time'): status = ('next run at: ' + datetime_repr(self.next_run_time) if self.next_run_time else 'paused') else: status = 'pending' return u'%s (trigger: %s, %s)' % (self.name, self.trigger, status) apscheduler-3.6.3/apscheduler/jobstores/000077500000000000000000000000001356022153700203375ustar00rootroot00000000000000apscheduler-3.6.3/apscheduler/jobstores/__init__.py000066400000000000000000000000001356022153700224360ustar00rootroot00000000000000apscheduler-3.6.3/apscheduler/jobstores/base.py000066400000000000000000000106531356022153700216300ustar00rootroot00000000000000from abc import ABCMeta, abstractmethod import logging import six class JobLookupError(KeyError): """Raised when the job store cannot find a job for update or removal.""" def __init__(self, job_id): super(JobLookupError, self).__init__(u'No job by the id of %s was found' % job_id) class ConflictingIdError(KeyError): """Raised when the uniqueness of job IDs is being violated.""" def __init__(self, job_id): super(ConflictingIdError, self).__init__( u'Job identifier (%s) conflicts with an existing job' % job_id) class TransientJobError(ValueError): """ Raised when an attempt to add transient (with no func_ref) job to a persistent job store is detected. """ def __init__(self, job_id): super(TransientJobError, self).__init__( u'Job (%s) cannot be added to this job store because a reference to the callable ' u'could not be determined.' % job_id) class BaseJobStore(six.with_metaclass(ABCMeta)): """Abstract base class that defines the interface that every job store must implement.""" _scheduler = None _alias = None _logger = logging.getLogger('apscheduler.jobstores') def start(self, scheduler, alias): """ Called by the scheduler when the scheduler is being started or when the job store is being added to an already running scheduler. :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting this job store :param str|unicode alias: alias of this job store as it was assigned to the scheduler """ self._scheduler = scheduler self._alias = alias self._logger = logging.getLogger('apscheduler.jobstores.%s' % alias) def shutdown(self): """Frees any resources still bound to this job store.""" def _fix_paused_jobs_sorting(self, jobs): for i, job in enumerate(jobs): if job.next_run_time is not None: if i > 0: paused_jobs = jobs[:i] del jobs[:i] jobs.extend(paused_jobs) break @abstractmethod def lookup_job(self, job_id): """ Returns a specific job, or ``None`` if it isn't found.. The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of the returned job to point to the scheduler and itself, respectively. :param str|unicode job_id: identifier of the job :rtype: Job """ @abstractmethod def get_due_jobs(self, now): """ Returns the list of jobs that have ``next_run_time`` earlier or equal to ``now``. The returned jobs must be sorted by next run time (ascending). :param datetime.datetime now: the current (timezone aware) datetime :rtype: list[Job] """ @abstractmethod def get_next_run_time(self): """ Returns the earliest run time of all the jobs stored in this job store, or ``None`` if there are no active jobs. :rtype: datetime.datetime """ @abstractmethod def get_all_jobs(self): """ Returns a list of all jobs in this job store. The returned jobs should be sorted by next run time (ascending). Paused jobs (next_run_time == None) should be sorted last. The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of the returned jobs to point to the scheduler and itself, respectively. :rtype: list[Job] """ @abstractmethod def add_job(self, job): """ Adds the given job to this store. :param Job job: the job to add :raises ConflictingIdError: if there is another job in this store with the same ID """ @abstractmethod def update_job(self, job): """ Replaces the job in the store with the given newer version. :param Job job: the job to update :raises JobLookupError: if the job does not exist """ @abstractmethod def remove_job(self, job_id): """ Removes the given job from this store. :param str|unicode job_id: identifier of the job :raises JobLookupError: if the job does not exist """ @abstractmethod def remove_all_jobs(self): """Removes all jobs from this store.""" def __repr__(self): return '<%s>' % self.__class__.__name__ apscheduler-3.6.3/apscheduler/jobstores/memory.py000066400000000000000000000071071356022153700222260ustar00rootroot00000000000000from __future__ import absolute_import from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import datetime_to_utc_timestamp class MemoryJobStore(BaseJobStore): """ Stores jobs in an array in RAM. Provides no persistence support. Plugin alias: ``memory`` """ def __init__(self): super(MemoryJobStore, self).__init__() # list of (job, timestamp), sorted by next_run_time and job id (ascending) self._jobs = [] self._jobs_index = {} # id -> (job, timestamp) lookup table def lookup_job(self, job_id): return self._jobs_index.get(job_id, (None, None))[0] def get_due_jobs(self, now): now_timestamp = datetime_to_utc_timestamp(now) pending = [] for job, timestamp in self._jobs: if timestamp is None or timestamp > now_timestamp: break pending.append(job) return pending def get_next_run_time(self): return self._jobs[0][0].next_run_time if self._jobs else None def get_all_jobs(self): return [j[0] for j in self._jobs] def add_job(self, job): if job.id in self._jobs_index: raise ConflictingIdError(job.id) timestamp = datetime_to_utc_timestamp(job.next_run_time) index = self._get_job_index(timestamp, job.id) self._jobs.insert(index, (job, timestamp)) self._jobs_index[job.id] = (job, timestamp) def update_job(self, job): old_job, old_timestamp = self._jobs_index.get(job.id, (None, None)) if old_job is None: raise JobLookupError(job.id) # If the next run time has not changed, simply replace the job in its present index. # Otherwise, reinsert the job to the list to preserve the ordering. old_index = self._get_job_index(old_timestamp, old_job.id) new_timestamp = datetime_to_utc_timestamp(job.next_run_time) if old_timestamp == new_timestamp: self._jobs[old_index] = (job, new_timestamp) else: del self._jobs[old_index] new_index = self._get_job_index(new_timestamp, job.id) self._jobs.insert(new_index, (job, new_timestamp)) self._jobs_index[old_job.id] = (job, new_timestamp) def remove_job(self, job_id): job, timestamp = self._jobs_index.get(job_id, (None, None)) if job is None: raise JobLookupError(job_id) index = self._get_job_index(timestamp, job_id) del self._jobs[index] del self._jobs_index[job.id] def remove_all_jobs(self): self._jobs = [] self._jobs_index = {} def shutdown(self): self.remove_all_jobs() def _get_job_index(self, timestamp, job_id): """ Returns the index of the given job, or if it's not found, the index where the job should be inserted based on the given timestamp. :type timestamp: int :type job_id: str """ lo, hi = 0, len(self._jobs) timestamp = float('inf') if timestamp is None else timestamp while lo < hi: mid = (lo + hi) // 2 mid_job, mid_timestamp = self._jobs[mid] mid_timestamp = float('inf') if mid_timestamp is None else mid_timestamp if mid_timestamp > timestamp: hi = mid elif mid_timestamp < timestamp: lo = mid + 1 elif mid_job.id > job_id: hi = mid elif mid_job.id < job_id: lo = mid + 1 else: return mid return lo apscheduler-3.6.3/apscheduler/jobstores/mongodb.py000066400000000000000000000122601356022153700223370ustar00rootroot00000000000000from __future__ import absolute_import import warnings from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from bson.binary import Binary from pymongo.errors import DuplicateKeyError from pymongo import MongoClient, ASCENDING except ImportError: # pragma: nocover raise ImportError('MongoDBJobStore requires PyMongo installed') class MongoDBJobStore(BaseJobStore): """ Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to pymongo's `MongoClient `_. Plugin alias: ``mongodb`` :param str database: database to store jobs in :param str collection: collection to store jobs in :param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of providing connection arguments :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, database='apscheduler', collection='jobs', client=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(MongoDBJobStore, self).__init__() self.pickle_protocol = pickle_protocol if not database: raise ValueError('The "database" parameter must not be empty') if not collection: raise ValueError('The "collection" parameter must not be empty') if client: self.client = maybe_ref(client) else: connect_args.setdefault('w', 1) self.client = MongoClient(**connect_args) self.collection = self.client[database][collection] def start(self, scheduler, alias): super(MongoDBJobStore, self).start(scheduler, alias) self.collection.ensure_index('next_run_time', sparse=True) @property def connection(self): warnings.warn('The "connection" member is deprecated -- use "client" instead', DeprecationWarning) return self.client def lookup_job(self, job_id): document = self.collection.find_one(job_id, ['job_state']) return self._reconstitute_job(document['job_state']) if document else None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) return self._get_jobs({'next_run_time': {'$lte': timestamp}}) def get_next_run_time(self): document = self.collection.find_one({'next_run_time': {'$ne': None}}, projection=['next_run_time'], sort=[('next_run_time', ASCENDING)]) return utc_timestamp_to_datetime(document['next_run_time']) if document else None def get_all_jobs(self): jobs = self._get_jobs({}) self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): try: self.collection.insert({ '_id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) }) except DuplicateKeyError: raise ConflictingIdError(job.id) def update_job(self, job): changes = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } result = self.collection.update({'_id': job.id}, {'$set': changes}) if result and result['n'] == 0: raise JobLookupError(job.id) def remove_job(self, job_id): result = self.collection.remove(job_id) if result and result['n'] == 0: raise JobLookupError(job_id) def remove_all_jobs(self): self.collection.remove() def shutdown(self): self.client.close() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self, conditions): jobs = [] failed_job_ids = [] for document in self.collection.find(conditions, ['_id', 'job_state'], sort=[('next_run_time', ASCENDING)]): try: jobs.append(self._reconstitute_job(document['job_state'])) except BaseException: self._logger.exception('Unable to restore job "%s" -- removing it', document['_id']) failed_job_ids.append(document['_id']) # Remove all the jobs we failed to restore if failed_job_ids: self.collection.remove({'_id': {'$in': failed_job_ids}}) return jobs def __repr__(self): return '<%s (client=%s)>' % (self.__class__.__name__, self.client) apscheduler-3.6.3/apscheduler/jobstores/redis.py000066400000000000000000000125531356022153700220250ustar00rootroot00000000000000from __future__ import absolute_import from datetime import datetime from pytz import utc import six from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from redis import Redis except ImportError: # pragma: nocover raise ImportError('RedisJobStore requires redis installed') class RedisJobStore(BaseJobStore): """ Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's :class:`~redis.StrictRedis`. Plugin alias: ``redis`` :param int db: the database number to store jobs in :param str jobs_key: key to store jobs in :param str run_times_key: key to store the jobs' run times in :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times', pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(RedisJobStore, self).__init__() if db is None: raise ValueError('The "db" parameter must not be empty') if not jobs_key: raise ValueError('The "jobs_key" parameter must not be empty') if not run_times_key: raise ValueError('The "run_times_key" parameter must not be empty') self.pickle_protocol = pickle_protocol self.jobs_key = jobs_key self.run_times_key = run_times_key self.redis = Redis(db=int(db), **connect_args) def lookup_job(self, job_id): job_state = self.redis.hget(self.jobs_key, job_id) return self._reconstitute_job(job_state) if job_state else None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp) if job_ids: job_states = self.redis.hmget(self.jobs_key, *job_ids) return self._reconstitute_jobs(six.moves.zip(job_ids, job_states)) return [] def get_next_run_time(self): next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True) if next_run_time: return utc_timestamp_to_datetime(next_run_time[0][1]) def get_all_jobs(self): job_states = self.redis.hgetall(self.jobs_key) jobs = self._reconstitute_jobs(six.iteritems(job_states)) paused_sort_key = datetime(9999, 12, 31, tzinfo=utc) return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key) def add_job(self, job): if self.redis.hexists(self.jobs_key, job.id): raise ConflictingIdError(job.id) with self.redis.pipeline() as pipe: pipe.multi() pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol)) if job.next_run_time: pipe.zadd(self.run_times_key, {job.id: datetime_to_utc_timestamp(job.next_run_time)}) pipe.execute() def update_job(self, job): if not self.redis.hexists(self.jobs_key, job.id): raise JobLookupError(job.id) with self.redis.pipeline() as pipe: pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol)) if job.next_run_time: pipe.zadd(self.run_times_key, {job.id: datetime_to_utc_timestamp(job.next_run_time)}) else: pipe.zrem(self.run_times_key, job.id) pipe.execute() def remove_job(self, job_id): if not self.redis.hexists(self.jobs_key, job_id): raise JobLookupError(job_id) with self.redis.pipeline() as pipe: pipe.hdel(self.jobs_key, job_id) pipe.zrem(self.run_times_key, job_id) pipe.execute() def remove_all_jobs(self): with self.redis.pipeline() as pipe: pipe.delete(self.jobs_key) pipe.delete(self.run_times_key) pipe.execute() def shutdown(self): self.redis.connection_pool.disconnect() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _reconstitute_jobs(self, job_states): jobs = [] failed_job_ids = [] for job_id, job_state in job_states: try: jobs.append(self._reconstitute_job(job_state)) except BaseException: self._logger.exception('Unable to restore job "%s" -- removing it', job_id) failed_job_ids.append(job_id) # Remove all the jobs we failed to restore if failed_job_ids: with self.redis.pipeline() as pipe: pipe.hdel(self.jobs_key, *failed_job_ids) pipe.zrem(self.run_times_key, *failed_job_ids) pipe.execute() return jobs def __repr__(self): return '<%s>' % self.__class__.__name__ apscheduler-3.6.3/apscheduler/jobstores/rethinkdb.py000066400000000000000000000133471356022153700226730ustar00rootroot00000000000000from __future__ import absolute_import from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from rethinkdb import RethinkDB except ImportError: # pragma: nocover raise ImportError('RethinkDBJobStore requires rethinkdb installed') class RethinkDBJobStore(BaseJobStore): """ Stores jobs in a RethinkDB database. Any leftover keyword arguments are directly passed to rethinkdb's `RethinkdbClient `_. Plugin alias: ``rethinkdb`` :param str database: database to store jobs in :param str collection: collection to store jobs in :param client: a :class:`rethinkdb.net.Connection` instance to use instead of providing connection arguments :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, database='apscheduler', table='jobs', client=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(RethinkDBJobStore, self).__init__() if not database: raise ValueError('The "database" parameter must not be empty') if not table: raise ValueError('The "table" parameter must not be empty') self.database = database self.table_name = table self.table = None self.client = client self.pickle_protocol = pickle_protocol self.connect_args = connect_args self.r = RethinkDB() self.conn = None def start(self, scheduler, alias): super(RethinkDBJobStore, self).start(scheduler, alias) if self.client: self.conn = maybe_ref(self.client) else: self.conn = self.r.connect(db=self.database, **self.connect_args) if self.database not in self.r.db_list().run(self.conn): self.r.db_create(self.database).run(self.conn) if self.table_name not in self.r.table_list().run(self.conn): self.r.table_create(self.table_name).run(self.conn) if 'next_run_time' not in self.r.table(self.table_name).index_list().run(self.conn): self.r.table(self.table_name).index_create('next_run_time').run(self.conn) self.table = self.r.db(self.database).table(self.table_name) def lookup_job(self, job_id): results = list(self.table.get_all(job_id).pluck('job_state').run(self.conn)) return self._reconstitute_job(results[0]['job_state']) if results else None def get_due_jobs(self, now): return self._get_jobs(self.r.row['next_run_time'] <= datetime_to_utc_timestamp(now)) def get_next_run_time(self): results = list( self.table .filter(self.r.row['next_run_time'] != None) # noqa .order_by(self.r.asc('next_run_time')) .map(lambda x: x['next_run_time']) .limit(1) .run(self.conn) ) return utc_timestamp_to_datetime(results[0]) if results else None def get_all_jobs(self): jobs = self._get_jobs() self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): job_dict = { 'id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } results = self.table.insert(job_dict).run(self.conn) if results['errors'] > 0: raise ConflictingIdError(job.id) def update_job(self, job): changes = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)) } results = self.table.get_all(job.id).update(changes).run(self.conn) skipped = False in map(lambda x: results[x] == 0, results.keys()) if results['skipped'] > 0 or results['errors'] > 0 or not skipped: raise JobLookupError(job.id) def remove_job(self, job_id): results = self.table.get_all(job_id).delete().run(self.conn) if results['deleted'] + results['skipped'] != 1: raise JobLookupError(job_id) def remove_all_jobs(self): self.table.delete().run(self.conn) def shutdown(self): self.conn.close() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self, predicate=None): jobs = [] failed_job_ids = [] query = (self.table.filter(self.r.row['next_run_time'] != None).filter(predicate) # noqa if predicate else self.table) query = query.order_by('next_run_time', 'id').pluck('id', 'job_state') for document in query.run(self.conn): try: jobs.append(self._reconstitute_job(document['job_state'])) except Exception: self._logger.exception('Unable to restore job "%s" -- removing it', document['id']) failed_job_ids.append(document['id']) # Remove all the jobs we failed to restore if failed_job_ids: self.r.expr(failed_job_ids).for_each( lambda job_id: self.table.get_all(job_id).delete()).run(self.conn) return jobs def __repr__(self): connection = self.conn return '<%s (connection=%s)>' % (self.__class__.__name__, connection) apscheduler-3.6.3/apscheduler/jobstores/sqlalchemy.py000066400000000000000000000137521356022153700230630ustar00rootroot00000000000000from __future__ import absolute_import from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from sqlalchemy import ( create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select) from sqlalchemy.exc import IntegrityError from sqlalchemy.sql.expression import null except ImportError: # pragma: nocover raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed') class SQLAlchemyJobStore(BaseJobStore): """ Stores jobs in a database table using SQLAlchemy. The table will be created if it doesn't exist in the database. Plugin alias: ``sqlalchemy`` :param str url: connection string (see :ref:`SQLAlchemy documentation ` on this) :param engine: an SQLAlchemy :class:`~sqlalchemy.engine.Engine` to use instead of creating a new one based on ``url`` :param str tablename: name of the table to store jobs in :param metadata: a :class:`~sqlalchemy.schema.MetaData` instance to use instead of creating a new one :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available :param str tableschema: name of the (existing) schema in the target database where the table should be :param dict engine_options: keyword arguments to :func:`~sqlalchemy.create_engine` (ignored if ``engine`` is given) """ def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, tableschema=None, engine_options=None): super(SQLAlchemyJobStore, self).__init__() self.pickle_protocol = pickle_protocol metadata = maybe_ref(metadata) or MetaData() if engine: self.engine = maybe_ref(engine) elif url: self.engine = create_engine(url, **(engine_options or {})) else: raise ValueError('Need either "engine" or "url" defined') # 191 = max key length in MySQL for InnoDB/utf8mb4 tables, # 25 = precision that translates to an 8-byte float self.jobs_t = Table( tablename, metadata, Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True), Column('next_run_time', Float(25), index=True), Column('job_state', LargeBinary, nullable=False), schema=tableschema ) def start(self, scheduler, alias): super(SQLAlchemyJobStore, self).start(scheduler, alias) self.jobs_t.create(self.engine, True) def lookup_job(self, job_id): selectable = select([self.jobs_t.c.job_state]).where(self.jobs_t.c.id == job_id) job_state = self.engine.execute(selectable).scalar() return self._reconstitute_job(job_state) if job_state else None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp) def get_next_run_time(self): selectable = select([self.jobs_t.c.next_run_time]).\ where(self.jobs_t.c.next_run_time != null()).\ order_by(self.jobs_t.c.next_run_time).limit(1) next_run_time = self.engine.execute(selectable).scalar() return utc_timestamp_to_datetime(next_run_time) def get_all_jobs(self): jobs = self._get_jobs() self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): insert = self.jobs_t.insert().values(**{ 'id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) }) try: self.engine.execute(insert) except IntegrityError: raise ConflictingIdError(job.id) def update_job(self, job): update = self.jobs_t.update().values(**{ 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol) }).where(self.jobs_t.c.id == job.id) result = self.engine.execute(update) if result.rowcount == 0: raise JobLookupError(job.id) def remove_job(self, job_id): delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id) result = self.engine.execute(delete) if result.rowcount == 0: raise JobLookupError(job_id) def remove_all_jobs(self): delete = self.jobs_t.delete() self.engine.execute(delete) def shutdown(self): self.engine.dispose() def _reconstitute_job(self, job_state): job_state = pickle.loads(job_state) job_state['jobstore'] = self job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self, *conditions): jobs = [] selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).\ order_by(self.jobs_t.c.next_run_time) selectable = selectable.where(*conditions) if conditions else selectable failed_job_ids = set() for row in self.engine.execute(selectable): try: jobs.append(self._reconstitute_job(row.job_state)) except BaseException: self._logger.exception('Unable to restore job "%s" -- removing it', row.id) failed_job_ids.add(row.id) # Remove all the jobs we failed to restore if failed_job_ids: delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids)) self.engine.execute(delete) return jobs def __repr__(self): return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url) apscheduler-3.6.3/apscheduler/jobstores/zookeeper.py000066400000000000000000000144061356022153700227210ustar00rootroot00000000000000from __future__ import absolute_import import os from datetime import datetime from pytz import utc from kazoo.exceptions import NoNodeError, NodeExistsError from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from kazoo.client import KazooClient except ImportError: # pragma: nocover raise ImportError('ZooKeeperJobStore requires Kazoo installed') class ZooKeeperJobStore(BaseJobStore): """ Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to kazoo's `KazooClient `_. Plugin alias: ``zookeeper`` :param str path: path to store jobs in :param client: a :class:`~kazoo.client.KazooClient` instance to use instead of providing connection arguments :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available """ def __init__(self, path='/apscheduler', client=None, close_connection_on_exit=False, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): super(ZooKeeperJobStore, self).__init__() self.pickle_protocol = pickle_protocol self.close_connection_on_exit = close_connection_on_exit if not path: raise ValueError('The "path" parameter must not be empty') self.path = path if client: self.client = maybe_ref(client) else: self.client = KazooClient(**connect_args) self._ensured_path = False def _ensure_paths(self): if not self._ensured_path: self.client.ensure_path(self.path) self._ensured_path = True def start(self, scheduler, alias): super(ZooKeeperJobStore, self).start(scheduler, alias) if not self.client.connected: self.client.start() def lookup_job(self, job_id): self._ensure_paths() node_path = os.path.join(self.path, job_id) try: content, _ = self.client.get(node_path) doc = pickle.loads(content) job = self._reconstitute_job(doc['job_state']) return job except BaseException: return None def get_due_jobs(self, now): timestamp = datetime_to_utc_timestamp(now) jobs = [job_def['job'] for job_def in self._get_jobs() if job_def['next_run_time'] is not None and job_def['next_run_time'] <= timestamp] return jobs def get_next_run_time(self): next_runs = [job_def['next_run_time'] for job_def in self._get_jobs() if job_def['next_run_time'] is not None] return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None def get_all_jobs(self): jobs = [job_def['job'] for job_def in self._get_jobs()] self._fix_paused_jobs_sorting(jobs) return jobs def add_job(self, job): self._ensure_paths() node_path = os.path.join(self.path, str(job.id)) value = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': job.__getstate__() } data = pickle.dumps(value, self.pickle_protocol) try: self.client.create(node_path, value=data) except NodeExistsError: raise ConflictingIdError(job.id) def update_job(self, job): self._ensure_paths() node_path = os.path.join(self.path, str(job.id)) changes = { 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': job.__getstate__() } data = pickle.dumps(changes, self.pickle_protocol) try: self.client.set(node_path, value=data) except NoNodeError: raise JobLookupError(job.id) def remove_job(self, job_id): self._ensure_paths() node_path = os.path.join(self.path, str(job_id)) try: self.client.delete(node_path) except NoNodeError: raise JobLookupError(job_id) def remove_all_jobs(self): try: self.client.delete(self.path, recursive=True) except NoNodeError: pass self._ensured_path = False def shutdown(self): if self.close_connection_on_exit: self.client.stop() self.client.close() def _reconstitute_job(self, job_state): job_state = job_state job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler job._jobstore_alias = self._alias return job def _get_jobs(self): self._ensure_paths() jobs = [] failed_job_ids = [] all_ids = self.client.get_children(self.path) for node_name in all_ids: try: node_path = os.path.join(self.path, node_name) content, _ = self.client.get(node_path) doc = pickle.loads(content) job_def = { 'job_id': node_name, 'next_run_time': doc['next_run_time'] if doc['next_run_time'] else None, 'job_state': doc['job_state'], 'job': self._reconstitute_job(doc['job_state']), 'creation_time': _.ctime } jobs.append(job_def) except BaseException: self._logger.exception('Unable to restore job "%s" -- removing it' % node_name) failed_job_ids.append(node_name) # Remove all the jobs we failed to restore if failed_job_ids: for failed_id in failed_job_ids: self.remove_job(failed_id) paused_sort_key = datetime(9999, 12, 31, tzinfo=utc) return sorted(jobs, key=lambda job_def: (job_def['job'].next_run_time or paused_sort_key, job_def['creation_time'])) def __repr__(self): self._logger.exception('<%s (client=%s)>' % (self.__class__.__name__, self.client)) return '<%s (client=%s)>' % (self.__class__.__name__, self.client) apscheduler-3.6.3/apscheduler/schedulers/000077500000000000000000000000001356022153700204665ustar00rootroot00000000000000apscheduler-3.6.3/apscheduler/schedulers/__init__.py000066400000000000000000000006261356022153700226030ustar00rootroot00000000000000class SchedulerAlreadyRunningError(Exception): """Raised when attempting to start or configure the scheduler when it's already running.""" def __str__(self): return 'Scheduler is already running' class SchedulerNotRunningError(Exception): """Raised when attempting to shutdown the scheduler when it's not running.""" def __str__(self): return 'Scheduler is not running' apscheduler-3.6.3/apscheduler/schedulers/asyncio.py000066400000000000000000000040421356022153700225050ustar00rootroot00000000000000from __future__ import absolute_import from functools import wraps, partial from apscheduler.schedulers.base import BaseScheduler from apscheduler.util import maybe_ref try: import asyncio except ImportError: # pragma: nocover try: import trollius as asyncio except ImportError: raise ImportError( 'AsyncIOScheduler requires either Python 3.4 or the asyncio package installed') def run_in_event_loop(func): @wraps(func) def wrapper(self, *args, **kwargs): wrapped = partial(func, self, *args, **kwargs) self._eventloop.call_soon_threadsafe(wrapped) return wrapper class AsyncIOScheduler(BaseScheduler): """ A scheduler that runs on an asyncio (:pep:`3156`) event loop. The default executor can run jobs based on native coroutines (``async def``). Extra options: ============== ============================================================= ``event_loop`` AsyncIO event loop to use (defaults to the global event loop) ============== ============================================================= """ _eventloop = None _timeout = None @run_in_event_loop def shutdown(self, wait=True): super(AsyncIOScheduler, self).shutdown(wait) self._stop_timer() def _configure(self, config): self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop() super(AsyncIOScheduler, self)._configure(config) def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup) def _stop_timer(self): if self._timeout: self._timeout.cancel() del self._timeout @run_in_event_loop def wakeup(self): self._stop_timer() wait_seconds = self._process_jobs() self._start_timer(wait_seconds) def _create_default_executor(self): from apscheduler.executors.asyncio import AsyncIOExecutor return AsyncIOExecutor() apscheduler-3.6.3/apscheduler/schedulers/background.py000066400000000000000000000027411356022153700231630ustar00rootroot00000000000000from __future__ import absolute_import from threading import Thread, Event from apscheduler.schedulers.base import BaseScheduler from apscheduler.schedulers.blocking import BlockingScheduler from apscheduler.util import asbool class BackgroundScheduler(BlockingScheduler): """ A scheduler that runs in the background using a separate thread (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately). Extra options: ========== ============================================================================= ``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``, see `the documentation `_ for further details) ========== ============================================================================= """ _thread = None def _configure(self, config): self._daemon = asbool(config.pop('daemon', True)) super(BackgroundScheduler, self)._configure(config) def start(self, *args, **kwargs): self._event = Event() BaseScheduler.start(self, *args, **kwargs) self._thread = Thread(target=self._main_loop, name='APScheduler') self._thread.daemon = self._daemon self._thread.start() def shutdown(self, *args, **kwargs): super(BackgroundScheduler, self).shutdown(*args, **kwargs) self._thread.join() del self._thread apscheduler-3.6.3/apscheduler/schedulers/base.py000066400000000000000000001235571356022153700217670ustar00rootroot00000000000000from __future__ import print_function from abc import ABCMeta, abstractmethod from threading import RLock from datetime import datetime, timedelta from logging import getLogger import warnings import sys from pkg_resources import iter_entry_points from tzlocal import get_localzone import six from apscheduler.schedulers import SchedulerAlreadyRunningError, SchedulerNotRunningError from apscheduler.executors.base import MaxInstancesReachedError, BaseExecutor from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.jobstores.base import ConflictingIdError, JobLookupError, BaseJobStore from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.job import Job from apscheduler.triggers.base import BaseTrigger from apscheduler.util import ( asbool, asint, astimezone, maybe_ref, timedelta_seconds, undefined, TIMEOUT_MAX) from apscheduler.events import ( SchedulerEvent, JobEvent, JobSubmissionEvent, EVENT_SCHEDULER_START, EVENT_SCHEDULER_SHUTDOWN, EVENT_JOBSTORE_ADDED, EVENT_JOBSTORE_REMOVED, EVENT_ALL, EVENT_JOB_MODIFIED, EVENT_JOB_REMOVED, EVENT_JOB_ADDED, EVENT_EXECUTOR_ADDED, EVENT_EXECUTOR_REMOVED, EVENT_ALL_JOBS_REMOVED, EVENT_JOB_SUBMITTED, EVENT_JOB_MAX_INSTANCES, EVENT_SCHEDULER_RESUMED, EVENT_SCHEDULER_PAUSED) try: from collections.abc import MutableMapping except ImportError: from collections import MutableMapping #: constant indicating a scheduler's stopped state STATE_STOPPED = 0 #: constant indicating a scheduler's running state (started and processing jobs) STATE_RUNNING = 1 #: constant indicating a scheduler's paused state (started but not processing jobs) STATE_PAUSED = 2 class BaseScheduler(six.with_metaclass(ABCMeta)): """ Abstract base class for all schedulers. Takes the following keyword arguments: :param str|logging.Logger logger: logger to use for the scheduler's logging (defaults to apscheduler.scheduler) :param str|datetime.tzinfo timezone: the default time zone (defaults to the local timezone) :param int|float jobstore_retry_interval: the minimum number of seconds to wait between retries in the scheduler's main loop if the job store raises an exception when getting the list of due jobs :param dict job_defaults: default values for newly added jobs :param dict jobstores: a dictionary of job store alias -> job store instance or configuration dict :param dict executors: a dictionary of executor alias -> executor instance or configuration dict :ivar int state: current running state of the scheduler (one of the following constants from ``apscheduler.schedulers.base``: ``STATE_STOPPED``, ``STATE_RUNNING``, ``STATE_PAUSED``) .. seealso:: :ref:`scheduler-config` """ _trigger_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.triggers')) _trigger_classes = {} _executor_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.executors')) _executor_classes = {} _jobstore_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.jobstores')) _jobstore_classes = {} # # Public API # def __init__(self, gconfig={}, **options): super(BaseScheduler, self).__init__() self._executors = {} self._executors_lock = self._create_lock() self._jobstores = {} self._jobstores_lock = self._create_lock() self._listeners = [] self._listeners_lock = self._create_lock() self._pending_jobs = [] self.state = STATE_STOPPED self.configure(gconfig, **options) def configure(self, gconfig={}, prefix='apscheduler.', **options): """ Reconfigures the scheduler with the given options. Can only be done when the scheduler isn't running. :param dict gconfig: a "global" configuration dictionary whose values can be overridden by keyword arguments to this method :param str|unicode prefix: pick only those keys from ``gconfig`` that are prefixed with this string (pass an empty string or ``None`` to use all keys) :raises SchedulerAlreadyRunningError: if the scheduler is already running """ if self.state != STATE_STOPPED: raise SchedulerAlreadyRunningError # If a non-empty prefix was given, strip it from the keys in the # global configuration dict if prefix: prefixlen = len(prefix) gconfig = dict((key[prefixlen:], value) for key, value in six.iteritems(gconfig) if key.startswith(prefix)) # Create a structure from the dotted options # (e.g. "a.b.c = d" -> {'a': {'b': {'c': 'd'}}}) config = {} for key, value in six.iteritems(gconfig): parts = key.split('.') parent = config key = parts.pop(0) while parts: parent = parent.setdefault(key, {}) key = parts.pop(0) parent[key] = value # Override any options with explicit keyword arguments config.update(options) self._configure(config) def start(self, paused=False): """ Start the configured executors and job stores and begin processing scheduled jobs. :param bool paused: if ``True``, don't start job processing until :meth:`resume` is called :raises SchedulerAlreadyRunningError: if the scheduler is already running :raises RuntimeError: if running under uWSGI with threads disabled """ if self.state != STATE_STOPPED: raise SchedulerAlreadyRunningError self._check_uwsgi() with self._executors_lock: # Create a default executor if nothing else is configured if 'default' not in self._executors: self.add_executor(self._create_default_executor(), 'default') # Start all the executors for alias, executor in six.iteritems(self._executors): executor.start(self, alias) with self._jobstores_lock: # Create a default job store if nothing else is configured if 'default' not in self._jobstores: self.add_jobstore(self._create_default_jobstore(), 'default') # Start all the job stores for alias, store in six.iteritems(self._jobstores): store.start(self, alias) # Schedule all pending jobs for job, jobstore_alias, replace_existing in self._pending_jobs: self._real_add_job(job, jobstore_alias, replace_existing) del self._pending_jobs[:] self.state = STATE_PAUSED if paused else STATE_RUNNING self._logger.info('Scheduler started') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_START)) if not paused: self.wakeup() @abstractmethod def shutdown(self, wait=True): """ Shuts down the scheduler, along with its executors and job stores. Does not interrupt any currently running jobs. :param bool wait: ``True`` to wait until all currently executing jobs have finished :raises SchedulerNotRunningError: if the scheduler has not been started yet """ if self.state == STATE_STOPPED: raise SchedulerNotRunningError self.state = STATE_STOPPED # Shut down all executors with self._executors_lock: for executor in six.itervalues(self._executors): executor.shutdown(wait) # Shut down all job stores with self._jobstores_lock: for jobstore in six.itervalues(self._jobstores): jobstore.shutdown() self._logger.info('Scheduler has been shut down') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)) def pause(self): """ Pause job processing in the scheduler. This will prevent the scheduler from waking up to do job processing until :meth:`resume` is called. It will not however stop any already running job processing. """ if self.state == STATE_STOPPED: raise SchedulerNotRunningError elif self.state == STATE_RUNNING: self.state = STATE_PAUSED self._logger.info('Paused scheduler job processing') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_PAUSED)) def resume(self): """Resume job processing in the scheduler.""" if self.state == STATE_STOPPED: raise SchedulerNotRunningError elif self.state == STATE_PAUSED: self.state = STATE_RUNNING self._logger.info('Resumed scheduler job processing') self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_RESUMED)) self.wakeup() @property def running(self): """ Return ``True`` if the scheduler has been started. This is a shortcut for ``scheduler.state != STATE_STOPPED``. """ return self.state != STATE_STOPPED def add_executor(self, executor, alias='default', **executor_opts): """ Adds an executor to this scheduler. Any extra keyword arguments will be passed to the executor plugin's constructor, assuming that the first argument is the name of an executor plugin. :param str|unicode|apscheduler.executors.base.BaseExecutor executor: either an executor instance or the name of an executor plugin :param str|unicode alias: alias for the scheduler :raises ValueError: if there is already an executor by the given alias """ with self._executors_lock: if alias in self._executors: raise ValueError('This scheduler already has an executor by the alias of "%s"' % alias) if isinstance(executor, BaseExecutor): self._executors[alias] = executor elif isinstance(executor, six.string_types): self._executors[alias] = executor = self._create_plugin_instance( 'executor', executor, executor_opts) else: raise TypeError('Expected an executor instance or a string, got %s instead' % executor.__class__.__name__) # Start the executor right away if the scheduler is running if self.state != STATE_STOPPED: executor.start(self, alias) self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_ADDED, alias)) def remove_executor(self, alias, shutdown=True): """ Removes the executor by the given alias from this scheduler. :param str|unicode alias: alias of the executor :param bool shutdown: ``True`` to shut down the executor after removing it """ with self._executors_lock: executor = self._lookup_executor(alias) del self._executors[alias] if shutdown: executor.shutdown() self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_REMOVED, alias)) def add_jobstore(self, jobstore, alias='default', **jobstore_opts): """ Adds a job store to this scheduler. Any extra keyword arguments will be passed to the job store plugin's constructor, assuming that the first argument is the name of a job store plugin. :param str|unicode|apscheduler.jobstores.base.BaseJobStore jobstore: job store to be added :param str|unicode alias: alias for the job store :raises ValueError: if there is already a job store by the given alias """ with self._jobstores_lock: if alias in self._jobstores: raise ValueError('This scheduler already has a job store by the alias of "%s"' % alias) if isinstance(jobstore, BaseJobStore): self._jobstores[alias] = jobstore elif isinstance(jobstore, six.string_types): self._jobstores[alias] = jobstore = self._create_plugin_instance( 'jobstore', jobstore, jobstore_opts) else: raise TypeError('Expected a job store instance or a string, got %s instead' % jobstore.__class__.__name__) # Start the job store right away if the scheduler isn't stopped if self.state != STATE_STOPPED: jobstore.start(self, alias) # Notify listeners that a new job store has been added self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_ADDED, alias)) # Notify the scheduler so it can scan the new job store for jobs if self.state != STATE_STOPPED: self.wakeup() def remove_jobstore(self, alias, shutdown=True): """ Removes the job store by the given alias from this scheduler. :param str|unicode alias: alias of the job store :param bool shutdown: ``True`` to shut down the job store after removing it """ with self._jobstores_lock: jobstore = self._lookup_jobstore(alias) del self._jobstores[alias] if shutdown: jobstore.shutdown() self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_REMOVED, alias)) def add_listener(self, callback, mask=EVENT_ALL): """ add_listener(callback, mask=EVENT_ALL) Adds a listener for scheduler events. When a matching event occurs, ``callback`` is executed with the event object as its sole argument. If the ``mask`` parameter is not provided, the callback will receive events of all types. :param callback: any callable that takes one argument :param int mask: bitmask that indicates which events should be listened to .. seealso:: :mod:`apscheduler.events` .. seealso:: :ref:`scheduler-events` """ with self._listeners_lock: self._listeners.append((callback, mask)) def remove_listener(self, callback): """Removes a previously added event listener.""" with self._listeners_lock: for i, (cb, _) in enumerate(self._listeners): if callback == cb: del self._listeners[i] def add_job(self, func, trigger=None, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', executor='default', replace_existing=False, **trigger_args): """ add_job(func, trigger=None, args=None, kwargs=None, id=None, \ name=None, misfire_grace_time=undefined, coalesce=undefined, \ max_instances=undefined, next_run_time=undefined, \ jobstore='default', executor='default', \ replace_existing=False, **trigger_args) Adds the given job to the job list and wakes up the scheduler if it's already running. Any option that defaults to ``undefined`` will be replaced with the corresponding default value when the job is scheduled (which happens when the scheduler is started, or immediately if the scheduler is already running). The ``func`` argument can be given either as a callable object or a textual reference in the ``package.module:some.object`` format, where the first half (separated by ``:``) is an importable module and the second half is a reference to the callable object, relative to the module. The ``trigger`` argument can either be: #. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case any extra keyword arguments to this method are passed on to the trigger's constructor #. an instance of a trigger class :param func: callable (or a textual reference to one) to run at the given time :param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when ``func`` is called :param list|tuple args: list of positional arguments to call func with :param dict kwargs: dict of keyword arguments to call func with :param str|unicode id: explicit identifier for the job (for modifying it later) :param str|unicode name: textual description of the job :param int misfire_grace_time: seconds after the designated runtime that the job is still allowed to be run :param bool coalesce: run once instead of many times if the scheduler determines that the job should be run more than once in succession :param int max_instances: maximum number of concurrently running instances allowed for this job :param datetime next_run_time: when to first run the job, regardless of the trigger (pass ``None`` to add the job as paused) :param str|unicode jobstore: alias of the job store to store the job in :param str|unicode executor: alias of the executor to run the job with :param bool replace_existing: ``True`` to replace an existing job with the same ``id`` (but retain the number of runs from the existing one) :rtype: Job """ job_kwargs = { 'trigger': self._create_trigger(trigger, trigger_args), 'executor': executor, 'func': func, 'args': tuple(args) if args is not None else (), 'kwargs': dict(kwargs) if kwargs is not None else {}, 'id': id, 'name': name, 'misfire_grace_time': misfire_grace_time, 'coalesce': coalesce, 'max_instances': max_instances, 'next_run_time': next_run_time } job_kwargs = dict((key, value) for key, value in six.iteritems(job_kwargs) if value is not undefined) job = Job(self, **job_kwargs) # Don't really add jobs to job stores before the scheduler is up and running with self._jobstores_lock: if self.state == STATE_STOPPED: self._pending_jobs.append((job, jobstore, replace_existing)) self._logger.info('Adding job tentatively -- it will be properly scheduled when ' 'the scheduler starts') else: self._real_add_job(job, jobstore, replace_existing) return job def scheduled_job(self, trigger, args=None, kwargs=None, id=None, name=None, misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined, next_run_time=undefined, jobstore='default', executor='default', **trigger_args): """ scheduled_job(trigger, args=None, kwargs=None, id=None, \ name=None, misfire_grace_time=undefined, \ coalesce=undefined, max_instances=undefined, \ next_run_time=undefined, jobstore='default', \ executor='default',**trigger_args) A decorator version of :meth:`add_job`, except that ``replace_existing`` is always ``True``. .. important:: The ``id`` argument must be given if scheduling a job in a persistent job store. The scheduler cannot, however, enforce this requirement. """ def inner(func): self.add_job(func, trigger, args, kwargs, id, name, misfire_grace_time, coalesce, max_instances, next_run_time, jobstore, executor, True, **trigger_args) return func return inner def modify_job(self, job_id, jobstore=None, **changes): """ Modifies the properties of a single job. Modifications are passed to this method as extra keyword arguments. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :return Job: the relevant job instance """ with self._jobstores_lock: job, jobstore = self._lookup_job(job_id, jobstore) job._modify(**changes) if jobstore: self._lookup_jobstore(jobstore).update_job(job) self._dispatch_event(JobEvent(EVENT_JOB_MODIFIED, job_id, jobstore)) # Wake up the scheduler since the job's next run time may have been changed if self.state == STATE_RUNNING: self.wakeup() return job def reschedule_job(self, job_id, jobstore=None, trigger=None, **trigger_args): """ Constructs a new trigger for a job and updates its next run time. Extra keyword arguments are passed directly to the trigger's constructor. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :param trigger: alias of the trigger type or a trigger instance :return Job: the relevant job instance """ trigger = self._create_trigger(trigger, trigger_args) now = datetime.now(self.timezone) next_run_time = trigger.get_next_fire_time(None, now) return self.modify_job(job_id, jobstore, trigger=trigger, next_run_time=next_run_time) def pause_job(self, job_id, jobstore=None): """ Causes the given job not to be executed until it is explicitly resumed. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :return Job: the relevant job instance """ return self.modify_job(job_id, jobstore, next_run_time=None) def resume_job(self, job_id, jobstore=None): """ Resumes the schedule of the given job, or removes the job if its schedule is finished. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :return Job|None: the relevant job instance if the job was rescheduled, or ``None`` if no next run time could be calculated and the job was removed """ with self._jobstores_lock: job, jobstore = self._lookup_job(job_id, jobstore) now = datetime.now(self.timezone) next_run_time = job.trigger.get_next_fire_time(None, now) if next_run_time: return self.modify_job(job_id, jobstore, next_run_time=next_run_time) else: self.remove_job(job.id, jobstore) def get_jobs(self, jobstore=None, pending=None): """ Returns a list of pending jobs (if the scheduler hasn't been started yet) and scheduled jobs, either from a specific job store or from all of them. If the scheduler has not been started yet, only pending jobs can be returned because the job stores haven't been started yet either. :param str|unicode jobstore: alias of the job store :param bool pending: **DEPRECATED** :rtype: list[Job] """ if pending is not None: warnings.warn('The "pending" option is deprecated -- get_jobs() always returns ' 'scheduled jobs if the scheduler has been started and pending jobs ' 'otherwise', DeprecationWarning) with self._jobstores_lock: jobs = [] if self.state == STATE_STOPPED: for job, alias, replace_existing in self._pending_jobs: if jobstore is None or alias == jobstore: jobs.append(job) else: for alias, store in six.iteritems(self._jobstores): if jobstore is None or alias == jobstore: jobs.extend(store.get_all_jobs()) return jobs def get_job(self, job_id, jobstore=None): """ Returns the Job that matches the given ``job_id``. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that most likely contains the job :return: the Job by the given ID, or ``None`` if it wasn't found :rtype: Job """ with self._jobstores_lock: try: return self._lookup_job(job_id, jobstore)[0] except JobLookupError: return def remove_job(self, job_id, jobstore=None): """ Removes a job, preventing it from being run any more. :param str|unicode job_id: the identifier of the job :param str|unicode jobstore: alias of the job store that contains the job :raises JobLookupError: if the job was not found """ jobstore_alias = None with self._jobstores_lock: # Check if the job is among the pending jobs if self.state == STATE_STOPPED: for i, (job, alias, replace_existing) in enumerate(self._pending_jobs): if job.id == job_id and jobstore in (None, alias): del self._pending_jobs[i] jobstore_alias = alias break else: # Otherwise, try to remove it from each store until it succeeds or we run out of # stores to check for alias, store in six.iteritems(self._jobstores): if jobstore in (None, alias): try: store.remove_job(job_id) jobstore_alias = alias break except JobLookupError: continue if jobstore_alias is None: raise JobLookupError(job_id) # Notify listeners that a job has been removed event = JobEvent(EVENT_JOB_REMOVED, job_id, jobstore_alias) self._dispatch_event(event) self._logger.info('Removed job %s', job_id) def remove_all_jobs(self, jobstore=None): """ Removes all jobs from the specified job store, or all job stores if none is given. :param str|unicode jobstore: alias of the job store """ with self._jobstores_lock: if self.state == STATE_STOPPED: if jobstore: self._pending_jobs = [pending for pending in self._pending_jobs if pending[1] != jobstore] else: self._pending_jobs = [] else: for alias, store in six.iteritems(self._jobstores): if jobstore in (None, alias): store.remove_all_jobs() self._dispatch_event(SchedulerEvent(EVENT_ALL_JOBS_REMOVED, jobstore)) def print_jobs(self, jobstore=None, out=None): """ print_jobs(jobstore=None, out=sys.stdout) Prints out a textual listing of all jobs currently scheduled on either all job stores or just a specific one. :param str|unicode jobstore: alias of the job store, ``None`` to list jobs from all stores :param file out: a file-like object to print to (defaults to **sys.stdout** if nothing is given) """ out = out or sys.stdout with self._jobstores_lock: if self.state == STATE_STOPPED: print(u'Pending jobs:', file=out) if self._pending_jobs: for job, jobstore_alias, replace_existing in self._pending_jobs: if jobstore in (None, jobstore_alias): print(u' %s' % job, file=out) else: print(u' No pending jobs', file=out) else: for alias, store in sorted(six.iteritems(self._jobstores)): if jobstore in (None, alias): print(u'Jobstore %s:' % alias, file=out) jobs = store.get_all_jobs() if jobs: for job in jobs: print(u' %s' % job, file=out) else: print(u' No scheduled jobs', file=out) @abstractmethod def wakeup(self): """ Notifies the scheduler that there may be jobs due for execution. Triggers :meth:`_process_jobs` to be run in an implementation specific manner. """ # # Private API # def _configure(self, config): # Set general options self._logger = maybe_ref(config.pop('logger', None)) or getLogger('apscheduler.scheduler') self.timezone = astimezone(config.pop('timezone', None)) or get_localzone() self.jobstore_retry_interval = float(config.pop('jobstore_retry_interval', 10)) # Set the job defaults job_defaults = config.get('job_defaults', {}) self._job_defaults = { 'misfire_grace_time': asint(job_defaults.get('misfire_grace_time', 1)), 'coalesce': asbool(job_defaults.get('coalesce', True)), 'max_instances': asint(job_defaults.get('max_instances', 1)) } # Configure executors self._executors.clear() for alias, value in six.iteritems(config.get('executors', {})): if isinstance(value, BaseExecutor): self.add_executor(value, alias) elif isinstance(value, MutableMapping): executor_class = value.pop('class', None) plugin = value.pop('type', None) if plugin: executor = self._create_plugin_instance('executor', plugin, value) elif executor_class: cls = maybe_ref(executor_class) executor = cls(**value) else: raise ValueError( 'Cannot create executor "%s" -- either "type" or "class" must be defined' % alias) self.add_executor(executor, alias) else: raise TypeError( "Expected executor instance or dict for executors['%s'], got %s instead" % (alias, value.__class__.__name__)) # Configure job stores self._jobstores.clear() for alias, value in six.iteritems(config.get('jobstores', {})): if isinstance(value, BaseJobStore): self.add_jobstore(value, alias) elif isinstance(value, MutableMapping): jobstore_class = value.pop('class', None) plugin = value.pop('type', None) if plugin: jobstore = self._create_plugin_instance('jobstore', plugin, value) elif jobstore_class: cls = maybe_ref(jobstore_class) jobstore = cls(**value) else: raise ValueError( 'Cannot create job store "%s" -- either "type" or "class" must be ' 'defined' % alias) self.add_jobstore(jobstore, alias) else: raise TypeError( "Expected job store instance or dict for jobstores['%s'], got %s instead" % (alias, value.__class__.__name__)) def _create_default_executor(self): """Creates a default executor store, specific to the particular scheduler type.""" return ThreadPoolExecutor() def _create_default_jobstore(self): """Creates a default job store, specific to the particular scheduler type.""" return MemoryJobStore() def _lookup_executor(self, alias): """ Returns the executor instance by the given name from the list of executors that were added to this scheduler. :type alias: str :raises KeyError: if no executor by the given alias is not found """ try: return self._executors[alias] except KeyError: raise KeyError('No such executor: %s' % alias) def _lookup_jobstore(self, alias): """ Returns the job store instance by the given name from the list of job stores that were added to this scheduler. :type alias: str :raises KeyError: if no job store by the given alias is not found """ try: return self._jobstores[alias] except KeyError: raise KeyError('No such job store: %s' % alias) def _lookup_job(self, job_id, jobstore_alias): """ Finds a job by its ID. :type job_id: str :param str jobstore_alias: alias of a job store to look in :return tuple[Job, str]: a tuple of job, jobstore alias (jobstore alias is None in case of a pending job) :raises JobLookupError: if no job by the given ID is found. """ if self.state == STATE_STOPPED: # Check if the job is among the pending jobs for job, alias, replace_existing in self._pending_jobs: if job.id == job_id: return job, None else: # Look in all job stores for alias, store in six.iteritems(self._jobstores): if jobstore_alias in (None, alias): job = store.lookup_job(job_id) if job is not None: return job, alias raise JobLookupError(job_id) def _dispatch_event(self, event): """ Dispatches the given event to interested listeners. :param SchedulerEvent event: the event to send """ with self._listeners_lock: listeners = tuple(self._listeners) for cb, mask in listeners: if event.code & mask: try: cb(event) except BaseException: self._logger.exception('Error notifying listener') def _check_uwsgi(self): """Check if we're running under uWSGI with threads disabled.""" uwsgi_module = sys.modules.get('uwsgi') if not getattr(uwsgi_module, 'has_threads', True): raise RuntimeError('The scheduler seems to be running under uWSGI, but threads have ' 'been disabled. You must run uWSGI with the --enable-threads ' 'option for the scheduler to work.') def _real_add_job(self, job, jobstore_alias, replace_existing): """ :param Job job: the job to add :param bool replace_existing: ``True`` to use update_job() in case the job already exists in the store """ # Fill in undefined values with defaults replacements = {} for key, value in six.iteritems(self._job_defaults): if not hasattr(job, key): replacements[key] = value # Calculate the next run time if there is none defined if not hasattr(job, 'next_run_time'): now = datetime.now(self.timezone) replacements['next_run_time'] = job.trigger.get_next_fire_time(None, now) # Apply any replacements job._modify(**replacements) # Add the job to the given job store store = self._lookup_jobstore(jobstore_alias) try: store.add_job(job) except ConflictingIdError: if replace_existing: store.update_job(job) else: raise # Mark the job as no longer pending job._jobstore_alias = jobstore_alias # Notify listeners that a new job has been added event = JobEvent(EVENT_JOB_ADDED, job.id, jobstore_alias) self._dispatch_event(event) self._logger.info('Added job "%s" to job store "%s"', job.name, jobstore_alias) # Notify the scheduler about the new job if self.state == STATE_RUNNING: self.wakeup() def _create_plugin_instance(self, type_, alias, constructor_kwargs): """Creates an instance of the given plugin type, loading the plugin first if necessary.""" plugin_container, class_container, base_class = { 'trigger': (self._trigger_plugins, self._trigger_classes, BaseTrigger), 'jobstore': (self._jobstore_plugins, self._jobstore_classes, BaseJobStore), 'executor': (self._executor_plugins, self._executor_classes, BaseExecutor) }[type_] try: plugin_cls = class_container[alias] except KeyError: if alias in plugin_container: plugin_cls = class_container[alias] = plugin_container[alias].load() if not issubclass(plugin_cls, base_class): raise TypeError('The {0} entry point does not point to a {0} class'. format(type_)) else: raise LookupError('No {0} by the name "{1}" was found'.format(type_, alias)) return plugin_cls(**constructor_kwargs) def _create_trigger(self, trigger, trigger_args): if isinstance(trigger, BaseTrigger): return trigger elif trigger is None: trigger = 'date' elif not isinstance(trigger, six.string_types): raise TypeError('Expected a trigger instance or string, got %s instead' % trigger.__class__.__name__) # Use the scheduler's time zone if nothing else is specified trigger_args.setdefault('timezone', self.timezone) # Instantiate the trigger class return self._create_plugin_instance('trigger', trigger, trigger_args) def _create_lock(self): """Creates a reentrant lock object.""" return RLock() def _process_jobs(self): """ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long to wait for the next round. If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least ``jobstore_retry_interval`` seconds. """ if self.state == STATE_PAUSED: self._logger.debug('Scheduler is paused -- not processing jobs') return None self._logger.debug('Looking for jobs to run') now = datetime.now(self.timezone) next_wakeup_time = None events = [] with self._jobstores_lock: for jobstore_alias, jobstore in six.iteritems(self._jobstores): try: due_jobs = jobstore.get_due_jobs(now) except Exception as e: # Schedule a wakeup at least in jobstore_retry_interval seconds self._logger.warning('Error getting due jobs from job store %r: %s', jobstore_alias, e) retry_wakeup_time = now + timedelta(seconds=self.jobstore_retry_interval) if not next_wakeup_time or next_wakeup_time > retry_wakeup_time: next_wakeup_time = retry_wakeup_time continue for job in due_jobs: # Look up the job's executor try: executor = self._lookup_executor(job.executor) except BaseException: self._logger.error( 'Executor lookup ("%s") failed for job "%s" -- removing it from the ' 'job store', job.executor, job) self.remove_job(job.id, jobstore_alias) continue run_times = job._get_run_times(now) run_times = run_times[-1:] if run_times and job.coalesce else run_times if run_times: try: executor.submit_job(job, run_times) except MaxInstancesReachedError: self._logger.warning( 'Execution of job "%s" skipped: maximum number of running ' 'instances reached (%d)', job, job.max_instances) event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id, jobstore_alias, run_times) events.append(event) except BaseException: self._logger.exception('Error submitting job "%s" to executor "%s"', job, job.executor) else: event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias, run_times) events.append(event) # Update the job if it has a next execution time. # Otherwise remove it from the job store. job_next_run = job.trigger.get_next_fire_time(run_times[-1], now) if job_next_run: job._modify(next_run_time=job_next_run) jobstore.update_job(job) else: self.remove_job(job.id, jobstore_alias) # Set a new next wakeup time if there isn't one yet or # the jobstore has an even earlier one jobstore_next_run_time = jobstore.get_next_run_time() if jobstore_next_run_time and (next_wakeup_time is None or jobstore_next_run_time < next_wakeup_time): next_wakeup_time = jobstore_next_run_time.astimezone(self.timezone) # Dispatch collected events for event in events: self._dispatch_event(event) # Determine the delay until this method should be called again if self.state == STATE_PAUSED: wait_seconds = None self._logger.debug('Scheduler is paused; waiting until resume() is called') elif next_wakeup_time is None: wait_seconds = None self._logger.debug('No jobs; waiting until a job is added') else: wait_seconds = min(max(timedelta_seconds(next_wakeup_time - now), 0), TIMEOUT_MAX) self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time, wait_seconds) return wait_seconds apscheduler-3.6.3/apscheduler/schedulers/blocking.py000066400000000000000000000016341356022153700226340ustar00rootroot00000000000000from __future__ import absolute_import from threading import Event from apscheduler.schedulers.base import BaseScheduler, STATE_STOPPED from apscheduler.util import TIMEOUT_MAX class BlockingScheduler(BaseScheduler): """ A scheduler that runs in the foreground (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block). """ _event = None def start(self, *args, **kwargs): self._event = Event() super(BlockingScheduler, self).start(*args, **kwargs) self._main_loop() def shutdown(self, wait=True): super(BlockingScheduler, self).shutdown(wait) self._event.set() def _main_loop(self): wait_seconds = TIMEOUT_MAX while self.state != STATE_STOPPED: self._event.wait(wait_seconds) self._event.clear() wait_seconds = self._process_jobs() def wakeup(self): self._event.set() apscheduler-3.6.3/apscheduler/schedulers/gevent.py000066400000000000000000000020071356022153700223270ustar00rootroot00000000000000from __future__ import absolute_import from apscheduler.schedulers.blocking import BlockingScheduler from apscheduler.schedulers.base import BaseScheduler try: from gevent.event import Event from gevent.lock import RLock import gevent except ImportError: # pragma: nocover raise ImportError('GeventScheduler requires gevent installed') class GeventScheduler(BlockingScheduler): """A scheduler that runs as a Gevent greenlet.""" _greenlet = None def start(self, *args, **kwargs): self._event = Event() BaseScheduler.start(self, *args, **kwargs) self._greenlet = gevent.spawn(self._main_loop) return self._greenlet def shutdown(self, *args, **kwargs): super(GeventScheduler, self).shutdown(*args, **kwargs) self._greenlet.join() del self._greenlet def _create_lock(self): return RLock() def _create_default_executor(self): from apscheduler.executors.gevent import GeventExecutor return GeventExecutor() apscheduler-3.6.3/apscheduler/schedulers/qt.py000066400000000000000000000024211356022153700214630ustar00rootroot00000000000000from __future__ import absolute_import from apscheduler.schedulers.base import BaseScheduler try: from PyQt5.QtCore import QObject, QTimer except (ImportError, RuntimeError): # pragma: nocover try: from PyQt4.QtCore import QObject, QTimer except ImportError: try: from PySide.QtCore import QObject, QTimer # noqa except ImportError: raise ImportError('QtScheduler requires either PyQt5, PyQt4 or PySide installed') class QtScheduler(BaseScheduler): """A scheduler that runs in a Qt event loop.""" _timer = None def shutdown(self, *args, **kwargs): super(QtScheduler, self).shutdown(*args, **kwargs) self._stop_timer() def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: wait_time = min(wait_seconds * 1000, 2147483647) self._timer = QTimer.singleShot(wait_time, self._process_jobs) def _stop_timer(self): if self._timer: if self._timer.isActive(): self._timer.stop() del self._timer def wakeup(self): self._start_timer(0) def _process_jobs(self): wait_seconds = super(QtScheduler, self)._process_jobs() self._start_timer(wait_seconds) apscheduler-3.6.3/apscheduler/schedulers/tornado.py000066400000000000000000000036061356022153700225130ustar00rootroot00000000000000from __future__ import absolute_import from datetime import timedelta from functools import wraps from apscheduler.schedulers.base import BaseScheduler from apscheduler.util import maybe_ref try: from tornado.ioloop import IOLoop except ImportError: # pragma: nocover raise ImportError('TornadoScheduler requires tornado installed') def run_in_ioloop(func): @wraps(func) def wrapper(self, *args, **kwargs): self._ioloop.add_callback(func, self, *args, **kwargs) return wrapper class TornadoScheduler(BaseScheduler): """ A scheduler that runs on a Tornado IOLoop. The default executor can run jobs based on native coroutines (``async def``). =========== =============================================================== ``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop) =========== =============================================================== """ _ioloop = None _timeout = None @run_in_ioloop def shutdown(self, wait=True): super(TornadoScheduler, self).shutdown(wait) self._stop_timer() def _configure(self, config): self._ioloop = maybe_ref(config.pop('io_loop', None)) or IOLoop.current() super(TornadoScheduler, self)._configure(config) def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: self._timeout = self._ioloop.add_timeout(timedelta(seconds=wait_seconds), self.wakeup) def _stop_timer(self): if self._timeout: self._ioloop.remove_timeout(self._timeout) del self._timeout def _create_default_executor(self): from apscheduler.executors.tornado import TornadoExecutor return TornadoExecutor() @run_in_ioloop def wakeup(self): self._stop_timer() wait_seconds = self._process_jobs() self._start_timer(wait_seconds) apscheduler-3.6.3/apscheduler/schedulers/twisted.py000066400000000000000000000034641356022153700225320ustar00rootroot00000000000000from __future__ import absolute_import from functools import wraps from apscheduler.schedulers.base import BaseScheduler from apscheduler.util import maybe_ref try: from twisted.internet import reactor as default_reactor except ImportError: # pragma: nocover raise ImportError('TwistedScheduler requires Twisted installed') def run_in_reactor(func): @wraps(func) def wrapper(self, *args, **kwargs): self._reactor.callFromThread(func, self, *args, **kwargs) return wrapper class TwistedScheduler(BaseScheduler): """ A scheduler that runs on a Twisted reactor. Extra options: =========== ======================================================== ``reactor`` Reactor instance to use (defaults to the global reactor) =========== ======================================================== """ _reactor = None _delayedcall = None def _configure(self, config): self._reactor = maybe_ref(config.pop('reactor', default_reactor)) super(TwistedScheduler, self)._configure(config) @run_in_reactor def shutdown(self, wait=True): super(TwistedScheduler, self).shutdown(wait) self._stop_timer() def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: self._delayedcall = self._reactor.callLater(wait_seconds, self.wakeup) def _stop_timer(self): if self._delayedcall and self._delayedcall.active(): self._delayedcall.cancel() del self._delayedcall @run_in_reactor def wakeup(self): self._stop_timer() wait_seconds = self._process_jobs() self._start_timer(wait_seconds) def _create_default_executor(self): from apscheduler.executors.twisted import TwistedExecutor return TwistedExecutor() apscheduler-3.6.3/apscheduler/triggers/000077500000000000000000000000001356022153700201535ustar00rootroot00000000000000apscheduler-3.6.3/apscheduler/triggers/__init__.py000066400000000000000000000000001356022153700222520ustar00rootroot00000000000000apscheduler-3.6.3/apscheduler/triggers/base.py000066400000000000000000000034551356022153700214460ustar00rootroot00000000000000from abc import ABCMeta, abstractmethod from datetime import timedelta import random import six class BaseTrigger(six.with_metaclass(ABCMeta)): """Abstract base class that defines the interface that every trigger must implement.""" __slots__ = () @abstractmethod def get_next_fire_time(self, previous_fire_time, now): """ Returns the next datetime to fire on, If no such datetime can be calculated, returns ``None``. :param datetime.datetime previous_fire_time: the previous time the trigger was fired :param datetime.datetime now: current datetime """ def _apply_jitter(self, next_fire_time, jitter, now): """ Randomize ``next_fire_time`` by adding or subtracting a random value (the jitter). If the resulting datetime is in the past, returns the initial ``next_fire_time`` without jitter. ``next_fire_time - jitter <= result <= next_fire_time + jitter`` :param datetime.datetime|None next_fire_time: next fire time without jitter applied. If ``None``, returns ``None``. :param int|None jitter: maximum number of seconds to add or subtract to ``next_fire_time``. If ``None`` or ``0``, returns ``next_fire_time`` :param datetime.datetime now: current datetime :return datetime.datetime|None: next fire time with a jitter. """ if next_fire_time is None or not jitter: return next_fire_time next_fire_time_with_jitter = next_fire_time + timedelta( seconds=random.uniform(-jitter, jitter)) if next_fire_time_with_jitter < now: # Next fire time with jitter is in the past. # Ignore jitter to avoid false misfire. return next_fire_time return next_fire_time_with_jitter apscheduler-3.6.3/apscheduler/triggers/combining.py000066400000000000000000000066211356022153700224770ustar00rootroot00000000000000from apscheduler.triggers.base import BaseTrigger from apscheduler.util import obj_to_ref, ref_to_obj class BaseCombiningTrigger(BaseTrigger): __slots__ = ('triggers', 'jitter') def __init__(self, triggers, jitter=None): self.triggers = triggers self.jitter = jitter def __getstate__(self): return { 'version': 1, 'triggers': [(obj_to_ref(trigger.__class__), trigger.__getstate__()) for trigger in self.triggers], 'jitter': self.jitter } def __setstate__(self, state): if state.get('version', 1) > 1: raise ValueError( 'Got serialized data for version %s of %s, but only versions up to 1 can be ' 'handled' % (state['version'], self.__class__.__name__)) self.jitter = state['jitter'] self.triggers = [] for clsref, state in state['triggers']: cls = ref_to_obj(clsref) trigger = cls.__new__(cls) trigger.__setstate__(state) self.triggers.append(trigger) def __repr__(self): return '<{}({}{})>'.format(self.__class__.__name__, self.triggers, ', jitter={}'.format(self.jitter) if self.jitter else '') class AndTrigger(BaseCombiningTrigger): """ Always returns the earliest next fire time that all the given triggers can agree on. The trigger is considered to be finished when any of the given triggers has finished its schedule. Trigger alias: ``and`` :param list triggers: triggers to combine :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. """ __slots__ = () def get_next_fire_time(self, previous_fire_time, now): while True: fire_times = [trigger.get_next_fire_time(previous_fire_time, now) for trigger in self.triggers] if None in fire_times: return None elif min(fire_times) == max(fire_times): return self._apply_jitter(fire_times[0], self.jitter, now) else: now = max(fire_times) def __str__(self): return 'and[{}]'.format(', '.join(str(trigger) for trigger in self.triggers)) class OrTrigger(BaseCombiningTrigger): """ Always returns the earliest next fire time produced by any of the given triggers. The trigger is considered finished when all the given triggers have finished their schedules. Trigger alias: ``or`` :param list triggers: triggers to combine :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. .. note:: Triggers that depends on the previous fire time, such as the interval trigger, may seem to behave strangely since they are always passed the previous fire time produced by any of the given triggers. """ __slots__ = () def get_next_fire_time(self, previous_fire_time, now): fire_times = [trigger.get_next_fire_time(previous_fire_time, now) for trigger in self.triggers] fire_times = [fire_time for fire_time in fire_times if fire_time is not None] if fire_times: return self._apply_jitter(min(fire_times), self.jitter, now) else: return None def __str__(self): return 'or[{}]'.format(', '.join(str(trigger) for trigger in self.triggers)) apscheduler-3.6.3/apscheduler/triggers/cron/000077500000000000000000000000001356022153700211145ustar00rootroot00000000000000apscheduler-3.6.3/apscheduler/triggers/cron/__init__.py000066400000000000000000000220361356022153700232300ustar00rootroot00000000000000from datetime import datetime, timedelta from tzlocal import get_localzone import six from apscheduler.triggers.base import BaseTrigger from apscheduler.triggers.cron.fields import ( BaseField, MonthField, WeekField, DayOfMonthField, DayOfWeekField, DEFAULT_VALUES) from apscheduler.util import datetime_ceil, convert_to_datetime, datetime_repr, astimezone class CronTrigger(BaseTrigger): """ Triggers when current time matches all specified time constraints, similarly to how the UNIX cron scheduler works. :param int|str year: 4-digit year :param int|str month: month (1-12) :param int|str day: day of the (1-31) :param int|str week: ISO week (1-53) :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun) :param int|str hour: hour (0-23) :param int|str minute: minute (0-59) :param int|str second: second (0-59) :param datetime|str start_date: earliest possible date/time to trigger on (inclusive) :param datetime|str end_date: latest possible date/time to trigger on (inclusive) :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults to scheduler timezone) :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. .. note:: The first weekday is always **monday**. """ FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second') FIELDS_MAP = { 'year': BaseField, 'month': MonthField, 'week': WeekField, 'day': DayOfMonthField, 'day_of_week': DayOfWeekField, 'hour': BaseField, 'minute': BaseField, 'second': BaseField } __slots__ = 'timezone', 'start_date', 'end_date', 'fields', 'jitter' def __init__(self, year=None, month=None, day=None, week=None, day_of_week=None, hour=None, minute=None, second=None, start_date=None, end_date=None, timezone=None, jitter=None): if timezone: self.timezone = astimezone(timezone) elif isinstance(start_date, datetime) and start_date.tzinfo: self.timezone = start_date.tzinfo elif isinstance(end_date, datetime) and end_date.tzinfo: self.timezone = end_date.tzinfo else: self.timezone = get_localzone() self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date') self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date') self.jitter = jitter values = dict((key, value) for (key, value) in six.iteritems(locals()) if key in self.FIELD_NAMES and value is not None) self.fields = [] assign_defaults = False for field_name in self.FIELD_NAMES: if field_name in values: exprs = values.pop(field_name) is_default = False assign_defaults = not values elif assign_defaults: exprs = DEFAULT_VALUES[field_name] is_default = True else: exprs = '*' is_default = True field_class = self.FIELDS_MAP[field_name] field = field_class(field_name, exprs, is_default) self.fields.append(field) @classmethod def from_crontab(cls, expr, timezone=None): """ Create a :class:`~CronTrigger` from a standard crontab expression. See https://en.wikipedia.org/wiki/Cron for more information on the format accepted here. :param expr: minute, hour, day of month, month, day of week :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations ( defaults to scheduler timezone) :return: a :class:`~CronTrigger` instance """ values = expr.split() if len(values) != 5: raise ValueError('Wrong number of fields; got {}, expected 5'.format(len(values))) return cls(minute=values[0], hour=values[1], day=values[2], month=values[3], day_of_week=values[4], timezone=timezone) def _increment_field_value(self, dateval, fieldnum): """ Increments the designated field and resets all less significant fields to their minimum values. :type dateval: datetime :type fieldnum: int :return: a tuple containing the new date, and the number of the field that was actually incremented :rtype: tuple """ values = {} i = 0 while i < len(self.fields): field = self.fields[i] if not field.REAL: if i == fieldnum: fieldnum -= 1 i -= 1 else: i += 1 continue if i < fieldnum: values[field.name] = field.get_value(dateval) i += 1 elif i > fieldnum: values[field.name] = field.get_min(dateval) i += 1 else: value = field.get_value(dateval) maxval = field.get_max(dateval) if value == maxval: fieldnum -= 1 i -= 1 else: values[field.name] = value + 1 i += 1 difference = datetime(**values) - dateval.replace(tzinfo=None) return self.timezone.normalize(dateval + difference), fieldnum def _set_field_value(self, dateval, fieldnum, new_value): values = {} for i, field in enumerate(self.fields): if field.REAL: if i < fieldnum: values[field.name] = field.get_value(dateval) elif i > fieldnum: values[field.name] = field.get_min(dateval) else: values[field.name] = new_value return self.timezone.localize(datetime(**values)) def get_next_fire_time(self, previous_fire_time, now): if previous_fire_time: start_date = min(now, previous_fire_time + timedelta(microseconds=1)) if start_date == previous_fire_time: start_date += timedelta(microseconds=1) else: start_date = max(now, self.start_date) if self.start_date else now fieldnum = 0 next_date = datetime_ceil(start_date).astimezone(self.timezone) while 0 <= fieldnum < len(self.fields): field = self.fields[fieldnum] curr_value = field.get_value(next_date) next_value = field.get_next_value(next_date) if next_value is None: # No valid value was found next_date, fieldnum = self._increment_field_value(next_date, fieldnum - 1) elif next_value > curr_value: # A valid, but higher than the starting value, was found if field.REAL: next_date = self._set_field_value(next_date, fieldnum, next_value) fieldnum += 1 else: next_date, fieldnum = self._increment_field_value(next_date, fieldnum) else: # A valid value was found, no changes necessary fieldnum += 1 # Return if the date has rolled past the end date if self.end_date and next_date > self.end_date: return None if fieldnum >= 0: next_date = self._apply_jitter(next_date, self.jitter, now) return min(next_date, self.end_date) if self.end_date else next_date def __getstate__(self): return { 'version': 2, 'timezone': self.timezone, 'start_date': self.start_date, 'end_date': self.end_date, 'fields': self.fields, 'jitter': self.jitter, } def __setstate__(self, state): # This is for compatibility with APScheduler 3.0.x if isinstance(state, tuple): state = state[1] if state.get('version', 1) > 2: raise ValueError( 'Got serialized data for version %s of %s, but only versions up to 2 can be ' 'handled' % (state['version'], self.__class__.__name__)) self.timezone = state['timezone'] self.start_date = state['start_date'] self.end_date = state['end_date'] self.fields = state['fields'] self.jitter = state.get('jitter') def __str__(self): options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] return 'cron[%s]' % (', '.join(options)) def __repr__(self): options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default] if self.start_date: options.append("start_date=%r" % datetime_repr(self.start_date)) if self.end_date: options.append("end_date=%r" % datetime_repr(self.end_date)) if self.jitter: options.append('jitter=%s' % self.jitter) return "<%s (%s, timezone='%s')>" % ( self.__class__.__name__, ', '.join(options), self.timezone) apscheduler-3.6.3/apscheduler/triggers/cron/expressions.py000066400000000000000000000217401356022153700240540ustar00rootroot00000000000000"""This module contains the expressions applicable for CronTrigger's fields.""" from calendar import monthrange import re from apscheduler.util import asint __all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression', 'WeekdayPositionExpression', 'LastDayOfMonthExpression') WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] MONTHS = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'] class AllExpression(object): value_re = re.compile(r'\*(?:/(?P\d+))?$') def __init__(self, step=None): self.step = asint(step) if self.step == 0: raise ValueError('Increment must be higher than 0') def validate_range(self, field_name): from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES value_range = MAX_VALUES[field_name] - MIN_VALUES[field_name] if self.step and self.step > value_range: raise ValueError('the step value ({}) is higher than the total range of the ' 'expression ({})'.format(self.step, value_range)) def get_next_value(self, date, field): start = field.get_value(date) minval = field.get_min(date) maxval = field.get_max(date) start = max(start, minval) if not self.step: next = start else: distance_to_next = (self.step - (start - minval)) % self.step next = start + distance_to_next if next <= maxval: return next def __eq__(self, other): return isinstance(other, self.__class__) and self.step == other.step def __str__(self): if self.step: return '*/%d' % self.step return '*' def __repr__(self): return "%s(%s)" % (self.__class__.__name__, self.step) class RangeExpression(AllExpression): value_re = re.compile( r'(?P\d+)(?:-(?P\d+))?(?:/(?P\d+))?$') def __init__(self, first, last=None, step=None): super(RangeExpression, self).__init__(step) first = asint(first) last = asint(last) if last is None and step is None: last = first if last is not None and first > last: raise ValueError('The minimum value in a range must not be higher than the maximum') self.first = first self.last = last def validate_range(self, field_name): from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES super(RangeExpression, self).validate_range(field_name) if self.first < MIN_VALUES[field_name]: raise ValueError('the first value ({}) is lower than the minimum value ({})' .format(self.first, MIN_VALUES[field_name])) if self.last is not None and self.last > MAX_VALUES[field_name]: raise ValueError('the last value ({}) is higher than the maximum value ({})' .format(self.last, MAX_VALUES[field_name])) value_range = (self.last or MAX_VALUES[field_name]) - self.first if self.step and self.step > value_range: raise ValueError('the step value ({}) is higher than the total range of the ' 'expression ({})'.format(self.step, value_range)) def get_next_value(self, date, field): startval = field.get_value(date) minval = field.get_min(date) maxval = field.get_max(date) # Apply range limits minval = max(minval, self.first) maxval = min(maxval, self.last) if self.last is not None else maxval nextval = max(minval, startval) # Apply the step if defined if self.step: distance_to_next = (self.step - (nextval - minval)) % self.step nextval += distance_to_next return nextval if nextval <= maxval else None def __eq__(self, other): return (isinstance(other, self.__class__) and self.first == other.first and self.last == other.last) def __str__(self): if self.last != self.first and self.last is not None: range = '%d-%d' % (self.first, self.last) else: range = str(self.first) if self.step: return '%s/%d' % (range, self.step) return range def __repr__(self): args = [str(self.first)] if self.last != self.first and self.last is not None or self.step: args.append(str(self.last)) if self.step: args.append(str(self.step)) return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) class MonthRangeExpression(RangeExpression): value_re = re.compile(r'(?P[a-z]+)(?:-(?P[a-z]+))?', re.IGNORECASE) def __init__(self, first, last=None): try: first_num = MONTHS.index(first.lower()) + 1 except ValueError: raise ValueError('Invalid month name "%s"' % first) if last: try: last_num = MONTHS.index(last.lower()) + 1 except ValueError: raise ValueError('Invalid month name "%s"' % last) else: last_num = None super(MonthRangeExpression, self).__init__(first_num, last_num) def __str__(self): if self.last != self.first and self.last is not None: return '%s-%s' % (MONTHS[self.first - 1], MONTHS[self.last - 1]) return MONTHS[self.first - 1] def __repr__(self): args = ["'%s'" % MONTHS[self.first]] if self.last != self.first and self.last is not None: args.append("'%s'" % MONTHS[self.last - 1]) return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) class WeekdayRangeExpression(RangeExpression): value_re = re.compile(r'(?P[a-z]+)(?:-(?P[a-z]+))?', re.IGNORECASE) def __init__(self, first, last=None): try: first_num = WEEKDAYS.index(first.lower()) except ValueError: raise ValueError('Invalid weekday name "%s"' % first) if last: try: last_num = WEEKDAYS.index(last.lower()) except ValueError: raise ValueError('Invalid weekday name "%s"' % last) else: last_num = None super(WeekdayRangeExpression, self).__init__(first_num, last_num) def __str__(self): if self.last != self.first and self.last is not None: return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last]) return WEEKDAYS[self.first] def __repr__(self): args = ["'%s'" % WEEKDAYS[self.first]] if self.last != self.first and self.last is not None: args.append("'%s'" % WEEKDAYS[self.last]) return "%s(%s)" % (self.__class__.__name__, ', '.join(args)) class WeekdayPositionExpression(AllExpression): options = ['1st', '2nd', '3rd', '4th', '5th', 'last'] value_re = re.compile(r'(?P%s) +(?P(?:\d+|\w+))' % '|'.join(options), re.IGNORECASE) def __init__(self, option_name, weekday_name): super(WeekdayPositionExpression, self).__init__(None) try: self.option_num = self.options.index(option_name.lower()) except ValueError: raise ValueError('Invalid weekday position "%s"' % option_name) try: self.weekday = WEEKDAYS.index(weekday_name.lower()) except ValueError: raise ValueError('Invalid weekday name "%s"' % weekday_name) def get_next_value(self, date, field): # Figure out the weekday of the month's first day and the number of days in that month first_day_wday, last_day = monthrange(date.year, date.month) # Calculate which day of the month is the first of the target weekdays first_hit_day = self.weekday - first_day_wday + 1 if first_hit_day <= 0: first_hit_day += 7 # Calculate what day of the month the target weekday would be if self.option_num < 5: target_day = first_hit_day + self.option_num * 7 else: target_day = first_hit_day + ((last_day - first_hit_day) // 7) * 7 if target_day <= last_day and target_day >= date.day: return target_day def __eq__(self, other): return (super(WeekdayPositionExpression, self).__eq__(other) and self.option_num == other.option_num and self.weekday == other.weekday) def __str__(self): return '%s %s' % (self.options[self.option_num], WEEKDAYS[self.weekday]) def __repr__(self): return "%s('%s', '%s')" % (self.__class__.__name__, self.options[self.option_num], WEEKDAYS[self.weekday]) class LastDayOfMonthExpression(AllExpression): value_re = re.compile(r'last', re.IGNORECASE) def __init__(self): super(LastDayOfMonthExpression, self).__init__(None) def get_next_value(self, date, field): return monthrange(date.year, date.month)[1] def __str__(self): return 'last' def __repr__(self): return "%s()" % self.__class__.__name__ apscheduler-3.6.3/apscheduler/triggers/cron/fields.py000066400000000000000000000066661356022153700227520ustar00rootroot00000000000000"""Fields represent CronTrigger options which map to :class:`~datetime.datetime` fields.""" from calendar import monthrange import re import six from apscheduler.triggers.cron.expressions import ( AllExpression, RangeExpression, WeekdayPositionExpression, LastDayOfMonthExpression, WeekdayRangeExpression, MonthRangeExpression) __all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField', 'WeekField', 'DayOfMonthField', 'DayOfWeekField') MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, 'day_of_week': 0, 'hour': 0, 'minute': 0, 'second': 0} MAX_VALUES = {'year': 9999, 'month': 12, 'day': 31, 'week': 53, 'day_of_week': 6, 'hour': 23, 'minute': 59, 'second': 59} DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*', 'day_of_week': '*', 'hour': 0, 'minute': 0, 'second': 0} SEPARATOR = re.compile(' *, *') class BaseField(object): REAL = True COMPILERS = [AllExpression, RangeExpression] def __init__(self, name, exprs, is_default=False): self.name = name self.is_default = is_default self.compile_expressions(exprs) def get_min(self, dateval): return MIN_VALUES[self.name] def get_max(self, dateval): return MAX_VALUES[self.name] def get_value(self, dateval): return getattr(dateval, self.name) def get_next_value(self, dateval): smallest = None for expr in self.expressions: value = expr.get_next_value(dateval, self) if smallest is None or (value is not None and value < smallest): smallest = value return smallest def compile_expressions(self, exprs): self.expressions = [] # Split a comma-separated expression list, if any for expr in SEPARATOR.split(str(exprs).strip()): self.compile_expression(expr) def compile_expression(self, expr): for compiler in self.COMPILERS: match = compiler.value_re.match(expr) if match: compiled_expr = compiler(**match.groupdict()) try: compiled_expr.validate_range(self.name) except ValueError as e: exc = ValueError('Error validating expression {!r}: {}'.format(expr, e)) six.raise_from(exc, None) self.expressions.append(compiled_expr) return raise ValueError('Unrecognized expression "%s" for field "%s"' % (expr, self.name)) def __eq__(self, other): return isinstance(self, self.__class__) and self.expressions == other.expressions def __str__(self): expr_strings = (str(e) for e in self.expressions) return ','.join(expr_strings) def __repr__(self): return "%s('%s', '%s')" % (self.__class__.__name__, self.name, self) class WeekField(BaseField): REAL = False def get_value(self, dateval): return dateval.isocalendar()[1] class DayOfMonthField(BaseField): COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, LastDayOfMonthExpression] def get_max(self, dateval): return monthrange(dateval.year, dateval.month)[1] class DayOfWeekField(BaseField): REAL = False COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression] def get_value(self, dateval): return dateval.weekday() class MonthField(BaseField): COMPILERS = BaseField.COMPILERS + [MonthRangeExpression] apscheduler-3.6.3/apscheduler/triggers/date.py000066400000000000000000000032511356022153700214430ustar00rootroot00000000000000from datetime import datetime from tzlocal import get_localzone from apscheduler.triggers.base import BaseTrigger from apscheduler.util import convert_to_datetime, datetime_repr, astimezone class DateTrigger(BaseTrigger): """ Triggers once on the given datetime. If ``run_date`` is left empty, current time is used. :param datetime|str run_date: the date/time to run the job at :param datetime.tzinfo|str timezone: time zone for ``run_date`` if it doesn't have one already """ __slots__ = 'run_date' def __init__(self, run_date=None, timezone=None): timezone = astimezone(timezone) or get_localzone() if run_date is not None: self.run_date = convert_to_datetime(run_date, timezone, 'run_date') else: self.run_date = datetime.now(timezone) def get_next_fire_time(self, previous_fire_time, now): return self.run_date if previous_fire_time is None else None def __getstate__(self): return { 'version': 1, 'run_date': self.run_date } def __setstate__(self, state): # This is for compatibility with APScheduler 3.0.x if isinstance(state, tuple): state = state[1] if state.get('version', 1) > 1: raise ValueError( 'Got serialized data for version %s of %s, but only version 1 can be handled' % (state['version'], self.__class__.__name__)) self.run_date = state['run_date'] def __str__(self): return 'date[%s]' % datetime_repr(self.run_date) def __repr__(self): return "<%s (run_date='%s')>" % (self.__class__.__name__, datetime_repr(self.run_date)) apscheduler-3.6.3/apscheduler/triggers/interval.py000066400000000000000000000104351356022153700223540ustar00rootroot00000000000000from datetime import timedelta, datetime from math import ceil from tzlocal import get_localzone from apscheduler.triggers.base import BaseTrigger from apscheduler.util import convert_to_datetime, timedelta_seconds, datetime_repr, astimezone class IntervalTrigger(BaseTrigger): """ Triggers on specified intervals, starting on ``start_date`` if specified, ``datetime.now()`` + interval otherwise. :param int weeks: number of weeks to wait :param int days: number of days to wait :param int hours: number of hours to wait :param int minutes: number of minutes to wait :param int seconds: number of seconds to wait :param datetime|str start_date: starting point for the interval calculation :param datetime|str end_date: latest possible date/time to trigger on :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most. """ __slots__ = 'timezone', 'start_date', 'end_date', 'interval', 'interval_length', 'jitter' def __init__(self, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, end_date=None, timezone=None, jitter=None): self.interval = timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds) self.interval_length = timedelta_seconds(self.interval) if self.interval_length == 0: self.interval = timedelta(seconds=1) self.interval_length = 1 if timezone: self.timezone = astimezone(timezone) elif isinstance(start_date, datetime) and start_date.tzinfo: self.timezone = start_date.tzinfo elif isinstance(end_date, datetime) and end_date.tzinfo: self.timezone = end_date.tzinfo else: self.timezone = get_localzone() start_date = start_date or (datetime.now(self.timezone) + self.interval) self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date') self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date') self.jitter = jitter def get_next_fire_time(self, previous_fire_time, now): if previous_fire_time: next_fire_time = previous_fire_time + self.interval elif self.start_date > now: next_fire_time = self.start_date else: timediff_seconds = timedelta_seconds(now - self.start_date) next_interval_num = int(ceil(timediff_seconds / self.interval_length)) next_fire_time = self.start_date + self.interval * next_interval_num if self.jitter is not None: next_fire_time = self._apply_jitter(next_fire_time, self.jitter, now) if not self.end_date or next_fire_time <= self.end_date: return self.timezone.normalize(next_fire_time) def __getstate__(self): return { 'version': 2, 'timezone': self.timezone, 'start_date': self.start_date, 'end_date': self.end_date, 'interval': self.interval, 'jitter': self.jitter, } def __setstate__(self, state): # This is for compatibility with APScheduler 3.0.x if isinstance(state, tuple): state = state[1] if state.get('version', 1) > 2: raise ValueError( 'Got serialized data for version %s of %s, but only versions up to 2 can be ' 'handled' % (state['version'], self.__class__.__name__)) self.timezone = state['timezone'] self.start_date = state['start_date'] self.end_date = state['end_date'] self.interval = state['interval'] self.interval_length = timedelta_seconds(self.interval) self.jitter = state.get('jitter') def __str__(self): return 'interval[%s]' % str(self.interval) def __repr__(self): options = ['interval=%r' % self.interval, 'start_date=%r' % datetime_repr(self.start_date)] if self.end_date: options.append("end_date=%r" % datetime_repr(self.end_date)) if self.jitter: options.append('jitter=%s' % self.jitter) return "<%s (%s, timezone='%s')>" % ( self.__class__.__name__, ', '.join(options), self.timezone) apscheduler-3.6.3/apscheduler/util.py000066400000000000000000000332131356022153700176560ustar00rootroot00000000000000"""This module contains several handy functions primarily meant for internal use.""" from __future__ import division from datetime import date, datetime, time, timedelta, tzinfo from calendar import timegm from functools import partial from inspect import isclass, ismethod import re from pytz import timezone, utc, FixedOffset import six try: from inspect import signature except ImportError: # pragma: nocover from funcsigs import signature try: from threading import TIMEOUT_MAX except ImportError: TIMEOUT_MAX = 4294967 # Maximum value accepted by Event.wait() on Windows try: from asyncio import iscoroutinefunction except ImportError: try: from trollius import iscoroutinefunction except ImportError: def iscoroutinefunction(func): return False __all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp', 'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name', 'obj_to_ref', 'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args', 'TIMEOUT_MAX') class _Undefined(object): def __nonzero__(self): return False def __bool__(self): return False def __repr__(self): return '' undefined = _Undefined() #: a unique object that only signifies that no value is defined def asint(text): """ Safely converts a string to an integer, returning ``None`` if the string is ``None``. :type text: str :rtype: int """ if text is not None: return int(text) def asbool(obj): """ Interprets an object as a boolean value. :rtype: bool """ if isinstance(obj, str): obj = obj.strip().lower() if obj in ('true', 'yes', 'on', 'y', 't', '1'): return True if obj in ('false', 'no', 'off', 'n', 'f', '0'): return False raise ValueError('Unable to interpret value "%s" as boolean' % obj) return bool(obj) def astimezone(obj): """ Interprets an object as a timezone. :rtype: tzinfo """ if isinstance(obj, six.string_types): return timezone(obj) if isinstance(obj, tzinfo): if not hasattr(obj, 'localize') or not hasattr(obj, 'normalize'): raise TypeError('Only timezones from the pytz library are supported') if obj.zone == 'local': raise ValueError( 'Unable to determine the name of the local timezone -- you must explicitly ' 'specify the name of the local timezone. Please refrain from using timezones like ' 'EST to prevent problems with daylight saving time. Instead, use a locale based ' 'timezone name (such as Europe/Helsinki).') return obj if obj is not None: raise TypeError('Expected tzinfo, got %s instead' % obj.__class__.__name__) _DATE_REGEX = re.compile( r'(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})' r'(?:[ T](?P\d{1,2}):(?P\d{1,2}):(?P\d{1,2})' r'(?:\.(?P\d{1,6}))?' r'(?PZ|[+-]\d\d:\d\d)?)?$') def convert_to_datetime(input, tz, arg_name): """ Converts the given object to a timezone aware datetime object. If a timezone aware datetime object is passed, it is returned unmodified. If a native datetime object is passed, it is given the specified timezone. If the input is a string, it is parsed as a datetime with the given timezone. Date strings are accepted in three different forms: date only (Y-m-d), date with time (Y-m-d H:M:S) or with date+time with microseconds (Y-m-d H:M:S.micro). Additionally you can override the time zone by giving a specific offset in the format specified by ISO 8601: Z (UTC), +HH:MM or -HH:MM. :param str|datetime input: the datetime or string to convert to a timezone aware datetime :param datetime.tzinfo tz: timezone to interpret ``input`` in :param str arg_name: the name of the argument (used in an error message) :rtype: datetime """ if input is None: return elif isinstance(input, datetime): datetime_ = input elif isinstance(input, date): datetime_ = datetime.combine(input, time()) elif isinstance(input, six.string_types): m = _DATE_REGEX.match(input) if not m: raise ValueError('Invalid date string') values = m.groupdict() tzname = values.pop('timezone') if tzname == 'Z': tz = utc elif tzname: hours, minutes = (int(x) for x in tzname[1:].split(':')) sign = 1 if tzname[0] == '+' else -1 tz = FixedOffset(sign * (hours * 60 + minutes)) values = {k: int(v or 0) for k, v in values.items()} datetime_ = datetime(**values) else: raise TypeError('Unsupported type for %s: %s' % (arg_name, input.__class__.__name__)) if datetime_.tzinfo is not None: return datetime_ if tz is None: raise ValueError( 'The "tz" argument must be specified if %s has no timezone information' % arg_name) if isinstance(tz, six.string_types): tz = timezone(tz) try: return tz.localize(datetime_, is_dst=None) except AttributeError: raise TypeError( 'Only pytz timezones are supported (need the localize() and normalize() methods)') def datetime_to_utc_timestamp(timeval): """ Converts a datetime instance to a timestamp. :type timeval: datetime :rtype: float """ if timeval is not None: return timegm(timeval.utctimetuple()) + timeval.microsecond / 1000000 def utc_timestamp_to_datetime(timestamp): """ Converts the given timestamp to a datetime instance. :type timestamp: float :rtype: datetime """ if timestamp is not None: return datetime.fromtimestamp(timestamp, utc) def timedelta_seconds(delta): """ Converts the given timedelta to seconds. :type delta: timedelta :rtype: float """ return delta.days * 24 * 60 * 60 + delta.seconds + \ delta.microseconds / 1000000.0 def datetime_ceil(dateval): """ Rounds the given datetime object upwards. :type dateval: datetime """ if dateval.microsecond > 0: return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond) return dateval def datetime_repr(dateval): return dateval.strftime('%Y-%m-%d %H:%M:%S %Z') if dateval else 'None' def get_callable_name(func): """ Returns the best available display name for the given function/callable. :rtype: str """ # the easy case (on Python 3.3+) if hasattr(func, '__qualname__'): return func.__qualname__ # class methods, bound and unbound methods f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None) if f_self and hasattr(func, '__name__'): f_class = f_self if isclass(f_self) else f_self.__class__ else: f_class = getattr(func, 'im_class', None) if f_class and hasattr(func, '__name__'): return '%s.%s' % (f_class.__name__, func.__name__) # class or class instance if hasattr(func, '__call__'): # class if hasattr(func, '__name__'): return func.__name__ # instance of a class with a __call__ method return func.__class__.__name__ raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func) def obj_to_ref(obj): """ Returns the path to the given callable. :rtype: str :raises TypeError: if the given object is not callable :raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested function """ if isinstance(obj, partial): raise ValueError('Cannot create a reference to a partial()') name = get_callable_name(obj) if '' in name: raise ValueError('Cannot create a reference to a lambda') if '' in name: raise ValueError('Cannot create a reference to a nested function') if ismethod(obj): if hasattr(obj, 'im_self') and obj.im_self: # bound method module = obj.im_self.__module__ elif hasattr(obj, 'im_class') and obj.im_class: # unbound method module = obj.im_class.__module__ else: module = obj.__module__ else: module = obj.__module__ return '%s:%s' % (module, name) def ref_to_obj(ref): """ Returns the object pointed to by ``ref``. :type ref: str """ if not isinstance(ref, six.string_types): raise TypeError('References must be strings') if ':' not in ref: raise ValueError('Invalid reference') modulename, rest = ref.split(':', 1) try: obj = __import__(modulename, fromlist=[rest]) except ImportError: raise LookupError('Error resolving reference %s: could not import module' % ref) try: for name in rest.split('.'): obj = getattr(obj, name) return obj except Exception: raise LookupError('Error resolving reference %s: error looking up object' % ref) def maybe_ref(ref): """ Returns the object that the given reference points to, if it is indeed a reference. If it is not a reference, the object is returned as-is. """ if not isinstance(ref, str): return ref return ref_to_obj(ref) if six.PY2: def repr_escape(string): if isinstance(string, six.text_type): return string.encode('ascii', 'backslashreplace') return string else: def repr_escape(string): return string def check_callable_args(func, args, kwargs): """ Ensures that the given callable can be called with the given arguments. :type args: tuple :type kwargs: dict """ pos_kwargs_conflicts = [] # parameters that have a match in both args and kwargs positional_only_kwargs = [] # positional-only parameters that have a match in kwargs unsatisfied_args = [] # parameters in signature that don't have a match in args or kwargs unsatisfied_kwargs = [] # keyword-only arguments that don't have a match in kwargs unmatched_args = list(args) # args that didn't match any of the parameters in the signature # kwargs that didn't match any of the parameters in the signature unmatched_kwargs = list(kwargs) # indicates if the signature defines *args and **kwargs respectively has_varargs = has_var_kwargs = False try: sig = signature(func) except ValueError: # signature() doesn't work against every kind of callable return for param in six.itervalues(sig.parameters): if param.kind == param.POSITIONAL_OR_KEYWORD: if param.name in unmatched_kwargs and unmatched_args: pos_kwargs_conflicts.append(param.name) elif unmatched_args: del unmatched_args[0] elif param.name in unmatched_kwargs: unmatched_kwargs.remove(param.name) elif param.default is param.empty: unsatisfied_args.append(param.name) elif param.kind == param.POSITIONAL_ONLY: if unmatched_args: del unmatched_args[0] elif param.name in unmatched_kwargs: unmatched_kwargs.remove(param.name) positional_only_kwargs.append(param.name) elif param.default is param.empty: unsatisfied_args.append(param.name) elif param.kind == param.KEYWORD_ONLY: if param.name in unmatched_kwargs: unmatched_kwargs.remove(param.name) elif param.default is param.empty: unsatisfied_kwargs.append(param.name) elif param.kind == param.VAR_POSITIONAL: has_varargs = True elif param.kind == param.VAR_KEYWORD: has_var_kwargs = True # Make sure there are no conflicts between args and kwargs if pos_kwargs_conflicts: raise ValueError('The following arguments are supplied in both args and kwargs: %s' % ', '.join(pos_kwargs_conflicts)) # Check if keyword arguments are being fed to positional-only parameters if positional_only_kwargs: raise ValueError('The following arguments cannot be given as keyword arguments: %s' % ', '.join(positional_only_kwargs)) # Check that the number of positional arguments minus the number of matched kwargs matches the # argspec if unsatisfied_args: raise ValueError('The following arguments have not been supplied: %s' % ', '.join(unsatisfied_args)) # Check that all keyword-only arguments have been supplied if unsatisfied_kwargs: raise ValueError( 'The following keyword-only arguments have not been supplied in kwargs: %s' % ', '.join(unsatisfied_kwargs)) # Check that the callable can accept the given number of positional arguments if not has_varargs and unmatched_args: raise ValueError( 'The list of positional arguments is longer than the target callable can handle ' '(allowed: %d, given in args: %d)' % (len(args) - len(unmatched_args), len(args))) # Check that the callable can accept the given keyword arguments if not has_var_kwargs and unmatched_kwargs: raise ValueError( 'The target callable does not accept the following keyword arguments: %s' % ', '.join(unmatched_kwargs)) def iscoroutinefunction_partial(f): while isinstance(f, partial): f = f.func # The asyncio version of iscoroutinefunction includes testing for @coroutine # decorations vs. the inspect version which does not. return iscoroutinefunction(f) apscheduler-3.6.3/docker-compose.yml000066400000000000000000000004641356022153700174670ustar00rootroot00000000000000version: "2" services: redis: image: redis ports: - 127.0.0.1:6379:6379 mongodb: image: mongo ports: - 127.0.0.1:27017:27017 rethinkdb: image: rethinkdb ports: - 127.0.0.1:28015:28015 zookeeper: image: zookeeper ports: - 127.0.0.1:2181:2181 apscheduler-3.6.3/docs/000077500000000000000000000000001356022153700147565ustar00rootroot00000000000000apscheduler-3.6.3/docs/conf.py000066400000000000000000000147441356022153700162670ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # APScheduler documentation build configuration file, created by # sphinx-quickstart on Fri Jul 31 02:56:30 2009. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import apscheduler # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'APScheduler' copyright = u'Alex Grönholm' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = apscheduler.version # The full version, including alpha/beta/rc tags. release = apscheduler.release # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build', 'build', '.tox', '.git', 'examples'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] autodoc_member_order = 'alphabetical' # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'APSchedulerdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'APScheduler.tex', u'APScheduler Documentation', u'Alex Grönholm', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True intersphinx_mapping = {'python': ('https://docs.python.org/', None), 'sqlalchemy': ('http://docs.sqlalchemy.org/en/latest/', None)} apscheduler-3.6.3/docs/contributing.rst000066400000000000000000000055151356022153700202250ustar00rootroot00000000000000########################### Contributing to APScheduler ########################### If you wish to add a feature or fix a bug in APScheduler, you need to follow certain procedures and rules to get your changes accepted. This is to maintain the high quality of the code base. Contribution Process ==================== 1. Fork the project on Github 2. Clone the fork to your local machine 3. Make the changes to the project 4. Run the test suite with tox (if you changed any code) 5. Repeat steps 3-4 until the test suite passes 6. Commit if you haven't already 7. Push the changes to your Github fork 8. Make a pull request on Github There is no need to update the change log -- this will be done prior to the next release at the latest. Should the test suite fail even before your changes (which should be rare), make sure you're at least not adding to the failures. Development Dependencies ======================== To fully run the test suite, you will need at least: * A MongoDB server * A Redis server * A Zookeeper server For other dependencies, it's best to look in tox.ini and install what is appropriate for the Python version you're using. Code Style ========== This project uses PEP 8 rules with its maximum allowed column limit of 99 characters. This limit applies to all text files (source code, tests, documentation). In particular, remember to group the imports correctly (standard library imports first, third party libs second, project libraries third, conditional imports last). The PEP 8 checker does not check for this. If in doubt, just follow the surrounding code style as closely as possible. Testing ======= Running the test suite is done using the tox_ utility. This will test the code base against all supported Python versions and performs some code quality checks using flake8_ as well. Some tests require the presence of external services (in practice, database servers). To help with that, there is a docker-compose_ configuration included. Running ``docker-compose up -d`` will start all the necessary services for the tests to work. Any nontrivial code changes must be accompanied with the appropriate tests. The tests should not only maintain the coverage, but should test any new functionality or bug fixes reasonably well. If you're fixing a bug, first make sure you have a test which fails against the unpatched codebase and succeeds against the fixed version. Naturally, the test suite has to pass on every Python version. If setting up all the required Python interpreters seems like too much trouble, make sure that it at least passes on the lowest supported versions of both Python 2 and 3. The full test suite is always run against each pull request, but it's a good idea to run the tests locally first. .. _tox: https://tox.readthedocs.io/ .. _flake8: http://flake8.pycqa.org/ .. _docker-compose: https://docs.docker.com/compose/ apscheduler-3.6.3/docs/extending.rst000066400000000000000000000132131356022153700174750ustar00rootroot00000000000000##################### Extending APScheduler ##################### This document is meant to explain how to develop your custom triggers, job stores, executors and schedulers. Custom triggers --------------- The built-in triggers cover the needs of the majority of all users. However, some users may need specialized scheduling logic. To that end, the trigger system was made pluggable. To implement your scheduling logic, subclass :class:`~apscheduler.triggers.base.BaseTrigger`. Look at the interface documentation in that class. Then look at the existing trigger implementations. That should give you a good idea what is expected of a trigger implementation. To use your trigger, you can use :meth:`~apscheduler.schedulers.base.BaseScheduler.add_job` like this:: trigger = MyTrigger(arg1='foo') scheduler.add_job(target, trigger) You can also register it as a plugin so you can use the alternate form of ``add_job``:: scheduler.add_job(target, 'my_trigger', arg1='foo') This is done by adding an entry point in your project's :file:`setup.py`:: ... entry_points={ 'apscheduler.triggers': ['my_trigger = mytoppackage.subpackage:MyTrigger'] } Custom job stores ----------------- If you want to store your jobs in a fancy new NoSQL database, or a totally custom datastore, you can implement your own job store by subclassing :class:`~apscheduler.jobstores.base.BaseJobStore`. A job store typically serializes the :class:`~apscheduler.job.Job` objects given to it, and constructs new Job objects from binary data when they are loaded from the backing store. It is important that the job store restores the ``_scheduler`` and ``_jobstore_alias`` attribute of any Job that it creates. Refer to existing implementations for examples. It should be noted that :class:`~apscheduler.jobstores.memory.MemoryJobStore` is special in that it does not deserialize the jobs. This comes with its own problems, which it handles in its own way. If your job store does serialize jobs, you can of course use a serializer other than pickle. You should, however, use the ``__getstate__`` and ``__setstate__`` special methods to respectively get and set the Job state. Pickle uses them implicitly. To use your job store, you can add it to the scheduler like this:: jobstore = MyJobStore() scheduler.add_jobstore(jobstore, 'mystore') You can also register it as a plugin so you can use can use the alternate form of ``add_jobstore``:: scheduler.add_jobstore('my_jobstore', 'mystore') This is done by adding an entry point in your project's :file:`setup.py`:: ... entry_points={ 'apscheduler.jobstores': ['my_jobstore = mytoppackage.subpackage:MyJobStore'] } Custom executors ---------------- If you need custom logic for executing your jobs, you can create your own executor classes. One scenario for this would be if you want to use distributed computing to run your jobs on other nodes. Start by subclassing :class:`~apscheduler.executors.base.BaseExecutor`. The responsibilities of an executor are as follows: * Performing any initialization when ``start()`` is called * Releasing any resources when ``shutdown()`` is called * Keeping track of the number of instances of each job running on it, and refusing to run more than the maximum * Notifying the scheduler of the results of the job If your executor needs to serialize the jobs, make sure you either use pickle for it, or invoke the ``__getstate__`` and ``__setstate__`` special methods to respectively get and set the Job state. Pickle uses them implicitly. To use your executor, you can add it to the scheduler like this:: executor = MyExecutor() scheduler.add_executor(executor, 'myexecutor') You can also register it as a plugin so you can use can use the alternate form of ``add_executor``:: scheduler.add_executor('my_executor', 'myexecutor') This is done by adding an entry point in your project's :file:`setup.py`:: ... entry_points={ 'apscheduler.executors': ['my_executor = mytoppackage.subpackage:MyExecutor'] } Custom schedulers ----------------- A typical situation where you would want to make your own scheduler subclass is when you want to integrate it with your application framework of choice. Your custom scheduler should always be a subclass of :class:`~apscheduler.schedulers.base.BaseScheduler`. But if you're not adapting to a framework that relies on callbacks, consider subclassing :class:`~apscheduler.schedulers.blocking.BlockingScheduler` instead. The most typical extension points for scheduler subclasses are: * :meth:`~apscheduler.schedulers.base.BaseScheduler.start` must be overridden to wake up the scheduler for the first time * :meth:`~apscheduler.schedulers.base.BaseScheduler.shutdown` must be overridden to release resources allocated during ``start()`` * :meth:`~apscheduler.schedulers.base.BaseScheduler.wakeup` must be overridden to manage the timernotify the scheduler of changes in the job store * :meth:`~apscheduler.schedulers.base.BaseScheduler._create_lock` override if your framework uses some alternate locking implementation (like gevent) * :meth:`~apscheduler.schedulers.base.BaseScheduler._create_default_executor` override if you need to use an alternative default executor .. important:: Remember to call the superclass implementations of overridden methods, even abstract ones (unless they're empty). The most important responsibility of the scheduler subclass is to manage the scheduler's sleeping based on the return values of ``_process_jobs()``. This can be done in various ways, including setting timeouts in ``wakeup()`` or running a blocking loop in ``start()``. Again, see the existing scheduler classes for examples. apscheduler-3.6.3/docs/faq.rst000066400000000000000000000135421356022153700162640ustar00rootroot00000000000000########################## Frequently Asked Questions ########################## Why doesn't the scheduler run my jobs? ====================================== This could be caused by a number of things. The two most common issues are: #. Running the scheduler inside a uWSGI worker process while threads have not been enabled (see the next section for this) #. Running a :class:`~apscheduler.schedulers.background.BackgroundScheduler` and then letting the execution reach the end of the script To demonstrate the latter case, a script like this will **not work**:: from apscheduler.schedulers.background import BackgroundScheduler def myjob(): print('hello') scheduler = BackgroundScheduler() scheduler.start() scheduler.add_job(myjob, 'cron', hour=0) The script above will **exit** right after calling ``add_job()`` so the scheduler will not have a chance to run the scheduled job. If you're having any other issue, then enabling debug logging as instructed in the :ref:`troubleshooting` section should shed some light into the problem. Why am I getting a ValueError? ============================== If you're receiving an error like the following:: ValueError: This Job cannot be serialized since the reference to its callable (>) could not be determined. Consider giving a textual reference (module:function name) instead. This means that the function you are attempting to schedule has one of the following problems: * It is a lambda function (e.g. ``lambda x: x + 1``) * It is a bound method (function tied to a particular instance of some class) * It is a nested function (function inside another function) * You are trying to schedule a function that is not tied to any actual module (such as a function defined in the REPL, hence ``__main__`` as the module name) In these cases, it is impossible for the scheduler to determine a "lookup path" to find that specific function instance in situations where, for example, the scheduler process is restarted, or a process pool worker is being sent the related job object. Common workarounds for these problems include: * Converting a lambda to a regular function * Moving a nested function to the module level or to class level as either a class method or a static method * In case of a bound method, passing the unbound version (``YourClass.method_name``) as the target function to ``add_job()`` with the class instance as the first argument (so it gets passed as the ``self`` argument) How can I use APScheduler with uWSGI? ===================================== uWSGI employs some tricks which disable the Global Interpreter Lock and with it, the use of threads which are vital to the operation of APScheduler. To fix this, you need to re-enable the GIL using the ``--enable-threads`` switch. See the `uWSGI documentation `_ for more details. Also, assuming that you will run more than one worker process (as you typically would in production), you should also read the next section. .. _uWSGI-threads: https://uwsgi-docs.readthedocs.io/en/latest/WSGIquickstart.html#a-note-on-python-threads How do I share a single job store among one or more worker processes? ===================================================================== Short answer: You can't. Long answer: Sharing a persistent job store among two or more processes will lead to incorrect scheduler behavior like duplicate execution or the scheduler missing jobs, etc. This is because APScheduler does not currently have any interprocess synchronization and signalling scheme that would enable the scheduler to be notified when a job has been added, modified or removed from a job store. Workaround: Run the scheduler in a dedicated process and connect to it via some sort of remote access mechanism like RPyC_, gRPC_ or an HTTP server. The source repository contains an example_ of a RPyC based service that is accessed by a client. .. _RPyC: https://rpyc.readthedocs.io/en/latest/ .. _gRPC: https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&uact=8&ved=2ahUKEwj-wMe-1eLcAhXSbZoKHdzGDZsQFjAAegQICRAB&url=https%3A%2F%2Fgrpc.io%2F&usg=AOvVaw0Jt5Y0OKbHd8MdFt9Kc2FO .. _example: https://github.com/agronholm/apscheduler/tree/master/examples/rpc How do I use APScheduler in a web application? ============================================== First read through the previous section. If you're running Django, you may want to check out django_apscheduler_. Note, however, that this is a third party library and APScheduler developers are not responsible for it. Likewise, there is an unofficial extension called Flask-APScheduler_ which may or may not be useful when running APScheduler with Flask. For Pyramid users, the pyramid_scheduler_ library may potentially be helpful. Other than that, you pretty much run APScheduler normally, usually using :class:`~apscheduler.schedulers.background.BackgroundScheduler`. If you're running an asynchronous web framework like aiohttp_, you probably want to use a different scheduler in order to take some advantage of the asynchronous nature of the framework. Is there a graphical user interface for APScheduler? ==================================================== No graphical interface is provided by the library itself. However, there are some third party implementations, but APScheduler developers are not responsible for them. Here is a potentially incomplete list: * django_apscheduler_ * apschedulerweb_ * `Nextdoor scheduler`_ .. _django_apscheduler: https://pypi.org/project/django-apscheduler/ .. _Flask-APScheduler: https://pypi.org/project/flask-apscheduler/ .. _pyramid_scheduler: https://github.com/cadithealth/pyramid_scheduler .. _aiohttp: https://pypi.org/project/aiohttp/ .. _apschedulerweb: https://github.com/marwinxxii/apschedulerweb .. _Nextdoor scheduler: https://github.com/Nextdoor/ndschedulerapscheduler-3.6.3/docs/index.rst000066400000000000000000000004121356022153700166140ustar00rootroot00000000000000Advanced Python Scheduler ========================= .. include:: ../README.rst :end-before: Documentation Table of Contents ================= .. toctree:: :maxdepth: 1 userguide versionhistory migration contributing extending faq py-modindex apscheduler-3.6.3/docs/migration.rst000066400000000000000000000120701356022153700175010ustar00rootroot00000000000000############################################### Migrating from previous versions of APScheduler ############################################### From v3.0 to v3.2 ================= Prior to v3.1, the scheduler inadvertently exposed the ability to fetch and manipulate jobs before the scheduler had been started. The scheduler now requires you to call ``scheduler.start()`` before attempting to access any of the jobs in the job stores. To ensure that no old jobs are mistakenly executed, you can start the scheduler in paused mode (``scheduler.start(paused=True)``) (introduced in v3.2) to avoid any premature job processing. From v2.x to v3.0 ================= The 3.0 series is API incompatible with previous releases due to a design overhaul. Scheduler changes ----------------- * The concept of "standalone mode" is gone. For ``standalone=True``, use :class:`~apscheduler.schedulers.blocking.BlockingScheduler` instead, and for ``standalone=False``, use :class:`~apscheduler.schedulers.background.BackgroundScheduler`. BackgroundScheduler matches the old default semantics. * Job defaults (like ``misfire_grace_time`` and ``coalesce``) must now be passed in a dictionary as the ``job_defaults`` option to :meth:`~apscheduler.schedulers.base.BaseScheduler.configure`. When supplying an ini-style configuration as the first argument, they will need a corresponding ``job_defaults.`` prefix. * The configuration key prefix for job stores was changed from ``jobstore.`` to ``jobstores.`` to match the dict-style configuration better. * The ``max_runs`` option has been dropped since the run counter could not be reliably preserved when replacing a job with another one with the same ID. To make up for this, the ``end_date`` option was added to cron and interval triggers. * The old thread pool is gone, replaced by ``ThreadPoolExecutor``. This means that the old ``threadpool`` options are no longer valid. See :ref:`scheduler-config` on how to configure executors. * The trigger-specific scheduling methods have been removed entirely from the scheduler. Use the generic :meth:`~apscheduler.schedulers.base.BaseScheduler.add_job` method or the :meth:`~apscheduler.schedulers.base.BaseScheduler.scheduled_job` decorator instead. The signatures of these methods were changed significantly. * The ``shutdown_threadpool`` and ``close_jobstores`` options have been removed from the :meth:`~apscheduler.schedulers.base.BaseScheduler.shutdown` method. Executors and job stores are now always shut down on scheduler shutdown. * :meth:`~apscheduler.scheduler.Scheduler.unschedule_job` and :meth:`~apscheduler.scheduler.Scheduler.unschedule_func` have been replaced by :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job`. You can also unschedule a job by using the job handle returned from :meth:`~apscheduler.schedulers.base.BaseScheduler.add_job`. Job store changes ----------------- The job store system was completely overhauled for both efficiency and forwards compatibility. Unfortunately, this means that the old data is not compatible with the new job stores. If you need to migrate existing data from APScheduler 2.x to 3.x, contact the APScheduler author. The Shelve job store had to be dropped because it could not support the new job store design. Use SQLAlchemyJobStore with SQLite instead. Trigger changes --------------- From 3.0 onwards, triggers now require a pytz timezone. This is normally provided by the scheduler, but if you were instantiating triggers manually before, then one must be supplied as the ``timezone`` argument. The only other backwards incompatible change was that ``get_next_fire_time()`` takes two arguments now: the previous fire time and the current datetime. From v1.x to 2.0 ================ There have been some API changes since the 1.x series. This document explains the changes made to v2.0 that are incompatible with the v1.x API. API changes ----------- * The behavior of cron scheduling with regards to default values for omitted fields has been made more intuitive -- omitted fields lower than the least significant explicitly defined field will default to their minimum values except for the week number and weekday fields * SchedulerShutdownError has been removed -- jobs are now added tentatively and scheduled for real when/if the scheduler is restarted * Scheduler.is_job_active() has been removed -- use ``job in scheduler.get_jobs()`` instead * dump_jobs() is now print_jobs() and prints directly to the given file or sys.stdout if none is given * The ``repeat`` parameter was removed from :meth:`~apscheduler.scheduler.Scheduler.add_interval_job` and :meth:`~apscheduler.scheduler.Scheduler.interval_schedule` in favor of the universal ``max_runs`` option * :meth:`~apscheduler.scheduler.Scheduler.unschedule_func` now raises a KeyError if the given function is not scheduled * The semantics of :meth:`~apscheduler.scheduler.Scheduler.shutdown` have changed -- the method no longer accepts a numeric argument, but two booleans Configuration changes --------------------- * The scheduler can no longer be reconfigured while it's running apscheduler-3.6.3/docs/modules/000077500000000000000000000000001356022153700164265ustar00rootroot00000000000000apscheduler-3.6.3/docs/modules/events.rst000066400000000000000000000047101356022153700204660ustar00rootroot00000000000000:mod:`apscheduler.events` ============================ .. automodule:: apscheduler.events API --- .. autoclass:: SchedulerEvent :members: .. autoclass:: JobEvent :members: :show-inheritance: .. autoclass:: JobSubmissionEvent :members: :show-inheritance: .. autoclass:: JobExecutionEvent :members: :show-inheritance: Event codes ----------- The following event codes are numeric constants importable from :mod:`apscheduler.events`. .. list-table:: :header-rows: 1 * - Constant - Description - Event class * - EVENT_SCHEDULER_STARTED - The scheduler was started - :class:`SchedulerEvent` * - EVENT_SCHEDULER_SHUTDOWN - The scheduler was shut down - :class:`SchedulerEvent` * - EVENT_SCHEDULER_PAUSED - Job processing in the scheduler was paused - :class:`SchedulerEvent` * - EVENT_SCHEDULER_RESUMED - Job processing in the scheduler was resumed - :class:`SchedulerEvent` * - EVENT_EXECUTOR_ADDED - An executor was added to the scheduler - :class:`SchedulerEvent` * - EVENT_EXECUTOR_REMOVED - An executor was removed to the scheduler - :class:`SchedulerEvent` * - EVENT_JOBSTORE_ADDED - A job store was added to the scheduler - :class:`SchedulerEvent` * - EVENT_JOBSTORE_REMOVED - A job store was removed from the scheduler - :class:`SchedulerEvent` * - EVENT_ALL_JOBS_REMOVED - All jobs were removed from either all job stores or one particular job store - :class:`SchedulerEvent` * - EVENT_JOB_ADDED - A job was added to a job store - :class:`JobEvent` * - EVENT_JOB_REMOVED - A job was removed from a job store - :class:`JobEvent` * - EVENT_JOB_MODIFIED - A job was modified from outside the scheduler - :class:`JobEvent` * - EVENT_JOB_SUBMITTED - A job was submitted to its executor to be run - :class:`JobSubmissionEvent` * - EVENT_JOB_MAX_INSTANCES - A job being submitted to its executor was not accepted by the executor because the job has already reached its maximum concurrently executing instances - :class:`JobSubmissionEvent` * - EVENT_JOB_EXECUTED - A job was executed successfully - :class:`JobExecutionEvent` * - EVENT_JOB_ERROR - A job raised an exception during execution - :class:`JobExecutionEvent` * - EVENT_JOB_MISSED - A job's execution was missed - :class:`JobExecutionEvent` * - EVENT_ALL - A catch-all mask that includes every event type - N/A apscheduler-3.6.3/docs/modules/executors/000077500000000000000000000000001356022153700204475ustar00rootroot00000000000000apscheduler-3.6.3/docs/modules/executors/asyncio.rst000066400000000000000000000003101356022153700226400ustar00rootroot00000000000000:mod:`apscheduler.executors.asyncio` ==================================== .. automodule:: apscheduler.executors.asyncio Module Contents --------------- .. autoclass:: AsyncIOExecutor :members: apscheduler-3.6.3/docs/modules/executors/base.rst000066400000000000000000000002751356022153700221170ustar00rootroot00000000000000:mod:`apscheduler.executors.base` ================================= .. automodule:: apscheduler.executors.base Module Contents --------------- .. autoclass:: BaseExecutor :members: apscheduler-3.6.3/docs/modules/executors/debug.rst000066400000000000000000000003001356022153700222600ustar00rootroot00000000000000:mod:`apscheduler.executors.debug` ================================== .. automodule:: apscheduler.executors.debug Module Contents --------------- .. autoclass:: DebugExecutor :members: apscheduler-3.6.3/docs/modules/executors/gevent.rst000066400000000000000000000003041356022153700224660ustar00rootroot00000000000000:mod:`apscheduler.executors.gevent` =================================== .. automodule:: apscheduler.executors.gevent Module Contents --------------- .. autoclass:: GeventExecutor :members: apscheduler-3.6.3/docs/modules/executors/pool.rst000066400000000000000000000003641356022153700221550ustar00rootroot00000000000000:mod:`apscheduler.executors.pool` ================================= .. automodule:: apscheduler.executors.pool Module Contents --------------- .. autoclass:: ThreadPoolExecutor :members: .. autoclass:: ProcessPoolExecutor :members: apscheduler-3.6.3/docs/modules/executors/twisted.rst000066400000000000000000000003101356022153700226560ustar00rootroot00000000000000:mod:`apscheduler.executors.twisted` ==================================== .. automodule:: apscheduler.executors.twisted Module Contents --------------- .. autoclass:: TwistedExecutor :members: apscheduler-3.6.3/docs/modules/job.rst000066400000000000000000000001721356022153700177320ustar00rootroot00000000000000:mod:`apscheduler.job` ====================== .. automodule:: apscheduler.job API --- .. autoclass:: Job :members: apscheduler-3.6.3/docs/modules/jobstores/000077500000000000000000000000001356022153700204405ustar00rootroot00000000000000apscheduler-3.6.3/docs/modules/jobstores/base.rst000066400000000000000000000002441356022153700221040ustar00rootroot00000000000000:mod:`apscheduler.jobstores.base` ================================= .. automodule:: apscheduler.jobstores.base API --- .. autoclass:: BaseJobStore :members: apscheduler-3.6.3/docs/modules/jobstores/memory.rst000066400000000000000000000014431356022153700225040ustar00rootroot00000000000000:mod:`apscheduler.jobstores.memory` =================================== .. automodule:: apscheduler.jobstores.memory API --- .. autoclass:: MemoryJobStore :show-inheritance: Introduction ------------ MemoryJobStore stores jobs in memory as-is, without serializing them. This allows you to schedule callables that are unreachable globally and use job non-serializable job arguments. .. list-table:: :widths: 1 4 * - External dependencies - none * - Example - ``examples/schedulers/blocking.py`` (`view online `_). .. caution:: Unlike with other job stores, changes made to any mutable job arguments persist across job invocations. You can use this to your advantage, however. apscheduler-3.6.3/docs/modules/jobstores/mongodb.rst000066400000000000000000000012661356022153700226240ustar00rootroot00000000000000:mod:`apscheduler.jobstores.mongodb` ==================================== .. automodule:: apscheduler.jobstores.mongodb API --- .. autoclass:: MongoDBJobStore(database='apscheduler', collection='jobs', client=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args) :show-inheritance: Introduction ------------ MongoDBJobStore stores jobs in a `MongoDB `_ database. .. list-table:: :widths: 1 4 * - External dependencies - `pymongo `_ * - Example - ``examples/jobstores/mongodb.py`` (`view online `_). apscheduler-3.6.3/docs/modules/jobstores/redis.rst000066400000000000000000000012571356022153700223050ustar00rootroot00000000000000:mod:`apscheduler.jobstores.redis` ================================== .. automodule:: apscheduler.jobstores.redis API --- .. autoclass:: RedisJobStore(db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times', pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args) :show-inheritance: Introduction ------------ RedisJobStore stores jobs in a `redis `_ database. .. list-table:: :widths: 1 4 * - External dependencies - `redis `_ * - Example - ``examples/jobstores/redis_.py`` (`view online `_). apscheduler-3.6.3/docs/modules/jobstores/rethinkdb.rst000066400000000000000000000013111356022153700231400ustar00rootroot00000000000000:mod:`apscheduler.jobstores.rethinkdb` ====================================== .. automodule:: apscheduler.jobstores.rethinkdb API --- .. autoclass:: RethinkDBJobStore(database='apscheduler', table='jobs', client=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args) :show-inheritance: Introduction ------------ RethinkDBJobStore stores jobs in a `RethinkDB `_ database. .. list-table:: :widths: 1 4 * - External dependencies - `rethinkdb `_ * - Example - ``examples/jobstores/rethinkdb_.py`` (`view online `_). apscheduler-3.6.3/docs/modules/jobstores/sqlalchemy.rst000066400000000000000000000017201356022153700233340ustar00rootroot00000000000000:mod:`apscheduler.jobstores.sqlalchemy` ======================================= .. automodule:: apscheduler.jobstores.sqlalchemy API --- .. autoclass:: SQLAlchemyJobStore(url=None, engine=None, tablename='apscheduler_jobs', metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL) :show-inheritance: Introduction ------------ SQLAlchemyJobStore stores jobs in any relational database management system supported by `SQLAlchemy `_. It can use either a preconfigured `Engine `_ or you can pass it a connection URL. .. list-table:: :widths: 1 4 * - External dependencies - `SQLAlchemy `_ (+ the backend specific driver package) * - Example - ``examples/jobstores/sqlalchemy_.py`` (`view online `_). apscheduler-3.6.3/docs/modules/jobstores/zookeeper.rst000066400000000000000000000013311356022153700231730ustar00rootroot00000000000000:mod:`apscheduler.jobstores.zookeeper` ====================================== .. automodule:: apscheduler.jobstores.zookeeper API --- .. autoclass:: ZooKeeperJobStore(path='/apscheduler', client=None, close_connection_on_exit=False, pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args) :show-inheritance: Introduction ------------ ZooKeeperJobStore stores jobs in an `Apache ZooKeeper `_ instance. .. list-table:: :widths: 1 4 * - External dependencies - `kazoo `_ * - Example - ``examples/jobstores/zookeeper.py`` (`view online `_). apscheduler-3.6.3/docs/modules/schedulers.rst000066400000000000000000000001611356022153700213170ustar00rootroot00000000000000:mod:`apscheduler.schedulers` ============================= .. automodule:: apscheduler.schedulers :members: apscheduler-3.6.3/docs/modules/schedulers/000077500000000000000000000000001356022153700205675ustar00rootroot00000000000000apscheduler-3.6.3/docs/modules/schedulers/asyncio.rst000066400000000000000000000017461356022153700227760ustar00rootroot00000000000000:mod:`apscheduler.schedulers.asyncio` ===================================== .. automodule:: apscheduler.schedulers.asyncio API --- .. autoclass:: AsyncIOScheduler :show-inheritance: Introduction ------------ AsyncIOScheduler was meant to be used with the `AsyncIO `_ event loop. By default, it will run jobs in the event loop's thread pool. If you have an application that runs on an AsyncIO event loop, you will want to use this scheduler. .. list-table:: :widths: 1 4 * - Default executor - :class:`~apscheduler.executors.asyncio.AsyncIOExecutor` * - External dependencies - * Python >= 3.4: none * Python 3.3: `asyncio `_ * Python <= 3.2: `trollius `_ * - Example - ``examples/schedulers/asyncio_.py`` (`view online `_). apscheduler-3.6.3/docs/modules/schedulers/background.rst000066400000000000000000000014241356022153700234410ustar00rootroot00000000000000:mod:`apscheduler.schedulers.background` ======================================== .. automodule:: apscheduler.schedulers.background API --- .. autoclass:: BackgroundScheduler :show-inheritance: Introduction ------------ BackgroundScheduler runs in a thread **inside** your existing application. Calling :meth:`~apscheduler.schedulers.blocking.BackgroundScheduler.start` will start the scheduler and it will continue running after the call returns. .. list-table:: :widths: 1 4 * - Default executor - :class:`~apscheduler.executors.pool.PoolExecutor` * - External dependencies - none * - Example - ``examples/schedulers/background.py`` (`view online `_). apscheduler-3.6.3/docs/modules/schedulers/base.rst000066400000000000000000000002351356022153700222330ustar00rootroot00000000000000:mod:`apscheduler.schedulers.base` ================================== .. automodule:: apscheduler.schedulers.base :members: :member-order: bysource apscheduler-3.6.3/docs/modules/schedulers/blocking.rst000066400000000000000000000015241356022153700231130ustar00rootroot00000000000000:mod:`apscheduler.schedulers.blocking` ====================================== .. automodule:: apscheduler.schedulers.blocking API --- .. autoclass:: BlockingScheduler :show-inheritance: Introduction ------------ BlockingScheduler is the simplest possible scheduler. It runs in the foreground, so when you call :meth:`~apscheduler.schedulers.blocking.BlockingScheduler.start`, the call never returns. BlockingScheduler can be useful if you want to use APScheduler as a standalone scheduler (e.g. to build a daemon). .. list-table:: :widths: 1 4 * - Default executor - :class:`~apscheduler.executors.pool.PoolExecutor` * - External dependencies - none * - Example - ``examples/schedulers/blocking.py`` (`view online `_). apscheduler-3.6.3/docs/modules/schedulers/gevent.rst000066400000000000000000000014451356022153700226150ustar00rootroot00000000000000:mod:`apscheduler.schedulers.gevent` ==================================== .. automodule:: apscheduler.schedulers.gevent API --- .. autoclass:: GeventScheduler :show-inheritance: Introduction ------------ GeventScheduler was meant to be used with applications that use `gevent `_. GeventScheduler uses gevent natively, so it doesn't require monkey patching. By default it executes jobs as greenlets. .. list-table:: :widths: 1 4 * - Default executor - :class:`~apscheduler.executors.gevent.GeventExecutor` * - External dependencies - `gevent `_ * - Example - ``examples/schedulers/gevent_.py`` (`view online `_). apscheduler-3.6.3/docs/modules/schedulers/qt.rst000066400000000000000000000013011356022153700217400ustar00rootroot00000000000000:mod:`apscheduler.schedulers.qt` ================================ .. automodule:: apscheduler.schedulers.qt API --- .. autoclass:: QtScheduler :show-inheritance: Introduction ------------ QtScheduler lets you integrate APScheduler with your `PySide ` or `PyQt `_ application. .. list-table:: :widths: 1 4 * - Default executor - :class:`~apscheduler.executors.pool.PoolExecutor` * - External dependencies - PySide or PyQt * - Example - ``examples/schedulers/qt.py`` (`view online `_). apscheduler-3.6.3/docs/modules/schedulers/tornado.rst000066400000000000000000000012671356022153700227750ustar00rootroot00000000000000:mod:`apscheduler.schedulers.tornado` ===================================== .. automodule:: apscheduler.schedulers.tornado API --- .. autoclass:: TornadoScheduler :show-inheritance: Introduction ------------ TornadoScheduler was meant to be used in `Tornado `_ applications. .. list-table:: :widths: 1 4 * - Default executor - :class:`~apscheduler.executors.pool.PoolExecutor` * - External dependencies - `tornado `_ * - Example - ``examples/schedulers/tornado_.py`` (`view online `_) apscheduler-3.6.3/docs/modules/schedulers/twisted.rst000066400000000000000000000013661356022153700230120ustar00rootroot00000000000000:mod:`apscheduler.schedulers.twisted` ===================================== .. automodule:: apscheduler.schedulers.twisted API --- .. autoclass:: TwistedScheduler :show-inheritance: Introduction ------------ TwistedScheduler was meant to be used in `Twisted `_ applications. By default it uses the reactor's thread pool to execute jobs. .. list-table:: :widths: 1 4 * - Default executor - :class:`~apscheduler.executors.twisted.TwistedExecutor` * - External dependencies - `twisted `_ * - Example - ``examples/schedulers/twisted_.py`` (`view online `_). apscheduler-3.6.3/docs/modules/triggers/000077500000000000000000000000001356022153700202545ustar00rootroot00000000000000apscheduler-3.6.3/docs/modules/triggers/base.rst000066400000000000000000000002401356022153700217140ustar00rootroot00000000000000:mod:`apscheduler.triggers.base` ================================ .. automodule:: apscheduler.triggers.base API --- .. autoclass:: BaseTrigger :members: apscheduler-3.6.3/docs/modules/triggers/combining.rst000066400000000000000000000020021356022153700227450ustar00rootroot00000000000000:mod:`apscheduler.triggers.combining` ===================================== These triggers combine the behavior of other triggers in different ways to produce schedules more complex than would be possible with any single built-in trigger. .. automodule:: apscheduler.triggers.combining API --- .. autoclass:: AndTrigger .. autoclass:: OrTrigger Examples -------- Run ``job_function`` every 2 hours, but only on Saturdays and Sundays:: from apscheduler.triggers.combining import AndTrigger from apscheduler.triggers.interval import IntervalTrigger from apscheduler.triggers.cron import CronTrigger trigger = AndTrigger([IntervalTrigger(hours=2), CronTrigger(day_of_week='sat,sun')]) scheduler.add_job(job_function, trigger) Run ``job_function`` every Monday at 2pm and every Tuesday at 3pm:: trigger = OrTrigger([CronTrigger(day_of_week='mon', hour=2), CronTrigger(day_of_week='tue', hour=3)]) scheduler.add_job(job_function, trigger) apscheduler-3.6.3/docs/modules/triggers/cron.rst000066400000000000000000000132331356022153700217510ustar00rootroot00000000000000:mod:`apscheduler.triggers.cron` ================================ .. automodule:: apscheduler.triggers.cron API --- Trigger alias for :meth:`~apscheduler.schedulers.base.BaseScheduler.add_job`: ``cron`` .. autoclass:: CronTrigger :show-inheritance: Introduction ------------ This is the most powerful of the built-in triggers in APScheduler. You can specify a variety of different expressions on each field, and when determining the next execution time, it finds the earliest possible time that satisfies the conditions in every field. This behavior resembles the "Cron" utility found in most UNIX-like operating systems. You can also specify the starting date and ending dates for the cron-style schedule through the ``start_date`` and ``end_date`` parameters, respectively. They can be given as a date/datetime object or text (in the `ISO 8601 `_ format). Unlike with crontab expressions, you can omit fields that you don't need. Fields greater than the least significant explicitly defined field default to ``*`` while lesser fields default to their minimum values except for ``week`` and ``day_of_week`` which default to ``*``. For example, ``day=1, minute=20`` is equivalent to ``year='*', month='*', day=1, week='*', day_of_week='*', hour='*', minute=20, second=0``. The job will then execute on the first day of every month on every year at 20 minutes of every hour. The code examples below should further illustrate this behavior. .. note:: The behavior for omitted fields was changed in APScheduler 2.0. Omitted fields previously always defaulted to ``*``. Expression types ---------------- The following table lists all the available expressions for use in the fields from year to second. Multiple expression can be given in a single field, separated by commas. ============== ===== ======================================================================================= Expression Field Description ============== ===== ======================================================================================= ``*`` any Fire on every value ``*/a`` any Fire every ``a`` values, starting from the minimum ``a-b`` any Fire on any value within the ``a-b`` range (a must be smaller than b) ``a-b/c`` any Fire every ``c`` values within the ``a-b`` range ``xth y`` day Fire on the ``x`` -th occurrence of weekday ``y`` within the month ``last x`` day Fire on the last occurrence of weekday ``x`` within the month ``last`` day Fire on the last day within the month ``x,y,z`` any Fire on any matching expression; can combine any number of any of the above expressions ============== ===== ======================================================================================= .. note:: The ``month`` and ``day_of_week`` fields accept abbreviated English month and weekday names (``jan`` – ``dec`` and ``mon`` – ``sun``) respectively. Daylight saving time behavior ----------------------------- The cron trigger works with the so-called "wall clock" time. Thus, if the selected time zone observes DST (daylight saving time), you should be aware that it may cause unexpected behavior with the cron trigger when entering or leaving DST. When switching from standard time to daylight saving time, clocks are moved either one hour or half an hour forward, depending on the time zone. Likewise, when switching back to standard time, clocks are moved one hour or half an hour backward. This will cause some time periods to either not exist at all, or be repeated. If your schedule would have the job executed on either one of these periods, it may execute more often or less often than expected. This is not a bug. If you wish to avoid this, either use a timezone that does not observe DST, for instance UTC. Alternatively, just find out about the DST switch times and avoid them in your scheduling. For example, the following schedule may be problematic:: # In the Europe/Helsinki timezone, this will not execute at all on the last sunday morning of March # Likewise, it will execute twice on the last sunday morning of October sched.add_job(job_function, 'cron', hour=3, minute=30) Examples -------- :: from apscheduler.schedulers.blocking import BlockingScheduler def job_function(): print "Hello World" sched = BlockingScheduler() # Schedules job_function to be run on the third Friday # of June, July, August, November and December at 00:00, 01:00, 02:00 and 03:00 sched.add_job(job_function, 'cron', month='6-8,11-12', day='3rd fri', hour='0-3') sched.start() You can use ``start_date`` and ``end_date`` to limit the total time in which the schedule runs:: # Runs from Monday to Friday at 5:30 (am) until 2014-05-30 00:00:00 sched.add_job(job_function, 'cron', day_of_week='mon-fri', hour=5, minute=30, end_date='2014-05-30') The :meth:`~apscheduler.schedulers.base.BaseScheduler.scheduled_job` decorator works nicely too:: @sched.scheduled_job('cron', id='my_job_id', day='last sun') def some_decorated_task(): print("I am printed at 00:00:00 on the last Sunday of every month!") To schedule a job using a standard crontab expression:: sched.add_job(job_function, CronTrigger.from_crontab('0 0 1-15 may-aug *')) The ``jitter`` option enables you to add a random component to the execution time. This might be useful if you have multiple servers and don't want them to run a job at the exact same moment or if you want to prevent jobs from running at sharp hours:: # Run the `job_function` every sharp hour with an extra-delay picked randomly in a [-120,+120] seconds window. sched.add_job(job_function, 'cron', hour='*', jitter=120) apscheduler-3.6.3/docs/modules/triggers/date.rst000066400000000000000000000026471356022153700217340ustar00rootroot00000000000000:mod:`apscheduler.triggers.date` ================================ .. automodule:: apscheduler.triggers.date API --- Trigger alias for :meth:`~apscheduler.schedulers.base.BaseScheduler.add_job`: ``date`` .. autoclass:: DateTrigger :show-inheritance: Introduction ------------ This is the simplest possible method of scheduling a job. It schedules a job to be executed once at the specified time. It is APScheduler's equivalent to the UNIX "at" command. The ``run_date`` can be given either as a date/datetime object or text (in the `ISO 8601 `_ format). Examples -------- :: from datetime import date from apscheduler.schedulers.blocking import BlockingScheduler sched = BlockingScheduler() def my_job(text): print(text) # The job will be executed on November 6th, 2009 sched.add_job(my_job, 'date', run_date=date(2009, 11, 6), args=['text']) sched.start() You can specify the exact time when the job should be run:: # The job will be executed on November 6th, 2009 at 16:30:05 sched.add_job(my_job, 'date', run_date=datetime(2009, 11, 6, 16, 30, 5), args=['text']) The run date can be given as text too:: sched.add_job(my_job, 'date', run_date='2009-11-06 16:30:05', args=['text']) To add a job to be run immediately:: # The 'date' trigger and datetime.now() as run_date are implicit sched.add_job(my_job, args=['text']) apscheduler-3.6.3/docs/modules/triggers/interval.rst000066400000000000000000000042531356022153700226360ustar00rootroot00000000000000:mod:`apscheduler.triggers.interval` ==================================== .. automodule:: apscheduler.triggers.interval API --- Trigger alias for :meth:`~apscheduler.schedulers.base.BaseScheduler.add_job`: ``interval`` .. autoclass:: IntervalTrigger :show-inheritance: Introduction ------------ This method schedules jobs to be run periodically, on selected intervals. You can also specify the starting date and ending dates for the schedule through the ``start_date`` and ``end_date`` parameters, respectively. They can be given as a date/datetime object or text (in the `ISO 8601 `_ format). If the start date is in the past, the trigger will not fire many times retroactively but instead calculates the next run time from the current time, based on the past start time. Examples -------- :: from datetime import datetime from apscheduler.schedulers.blocking import BlockingScheduler def job_function(): print("Hello World") sched = BlockingScheduler() # Schedule job_function to be called every two hours sched.add_job(job_function, 'interval', hours=2) sched.start() You can use ``start_date`` and ``end_date`` to limit the total time in which the schedule runs:: # The same as before, but starts on 2010-10-10 at 9:30 and stops on 2014-06-15 at 11:00   sched.add_job(job_function, 'interval', hours=2, start_date='2010-10-10 09:30:00', end_date='2014-06-15 11:00:00') The :meth:`~apscheduler.schedulers.base.BaseScheduler.scheduled_job` decorator works nicely too:: from apscheduler.scheduler import BlockingScheduler @sched.scheduled_job('interval', id='my_job_id', hours=2) def job_function(): print("Hello World") The ``jitter`` option enables you to add a random component to the execution time. This might be useful if you have multiple servers and don't want them to run a job at the exact same moment or if you want to prevent multiple jobs with similar options from always running concurrently:: # Run the `job_function` every hour with an extra-delay picked randomly in a [-120,+120] seconds window. sched.add_job(job_function, 'interval', hours=1, jitter=120) apscheduler-3.6.3/docs/py-modindex.rst000066400000000000000000000000341356022153700177420ustar00rootroot00000000000000API reference ============= apscheduler-3.6.3/docs/userguide.rst000066400000000000000000000460451356022153700175150ustar00rootroot00000000000000########## User guide ########## Installing APScheduler ---------------------- The preferred installation method is by using `pip `_:: $ pip install apscheduler If you don't have pip installed, you can easily install it by downloading and running `get-pip.py `_. If, for some reason, pip won't work, you can manually `download the APScheduler distribution `_ from PyPI, extract and then install it:: $ python setup.py install Code examples ------------- The source distribution contains the :file:`examples` directory where you can find many working examples for using APScheduler in different ways. The examples can also be `browsed online `_. Basic concepts -------------- APScheduler has four kinds of components: * triggers * job stores * executors * schedulers *Triggers* contain the scheduling logic. Each job has its own trigger which determines when the job should be run next. Beyond their initial configuration, triggers are completely stateless. *Job stores* house the scheduled jobs. The default job store simply keeps the jobs in memory, but others store them in various kinds of databases. A job's data is serialized when it is saved to a persistent job store, and deserialized when it's loaded back from it. Job stores (other than the default one) don't keep the job data in memory, but act as middlemen for saving, loading, updating and searching jobs in the backend. Job stores must never be shared between schedulers. *Executors* are what handle the running of the jobs. They do this typically by submitting the designated callable in a job to a thread or process pool. When the job is done, the executor notifies the scheduler which then emits an appropriate event. *Schedulers* are what bind the rest together. You typically have only one scheduler running in your application. The application developer doesn't normally deal with the job stores, executors or triggers directly. Instead, the scheduler provides the proper interface to handle all those. Configuring the job stores and executors is done through the scheduler, as is adding, modifying and removing jobs. Choosing the right scheduler, job store(s), executor(s) and trigger(s) ---------------------------------------------------------------------- Your choice of scheduler depends mostly on your programming environment and what you'll be using APScheduler for. Here's a quick guide for choosing a scheduler: * :class:`~apscheduler.schedulers.blocking.BlockingScheduler`: use when the scheduler is the only thing running in your process * :class:`~apscheduler.schedulers.background.BackgroundScheduler`: use when you're not using any of the frameworks below, and want the scheduler to run in the background inside your application * :class:`~apscheduler.schedulers.asyncio.AsyncIOScheduler`: use if your application uses the asyncio module * :class:`~apscheduler.schedulers.gevent.GeventScheduler`: use if your application uses gevent * :class:`~apscheduler.schedulers.tornado.TornadoScheduler`: use if you're building a Tornado application * :class:`~apscheduler.schedulers.twisted.TwistedScheduler`: use if you're building a Twisted application * :class:`~apscheduler.schedulers.qt.QtScheduler`: use if you're building a Qt application Simple enough, yes? To pick the appropriate job store, you need to determine whether you need job persistence or not. If you always recreate your jobs at the start of your application, then you can probably go with the default (:class:`~apscheduler.jobstores.memory.MemoryJobStore`). But if you need your jobs to persist over scheduler restarts or application crashes, then your choice usually boils down to what tools are used in your programming environment. If, however, you are in the position to choose freely, then :class:`~apscheduler.jobstores.sqlalchemy.SQLAlchemyJobStore` on a `PostgreSQL `_ backend is the recommended choice due to its strong data integrity protection. Likewise, the choice of executors is usually made for you if you use one of the frameworks above. Otherwise, the default :class:`~apscheduler.executors.pool.ThreadPoolExecutor` should be good enough for most purposes. If your workload involves CPU intensive operations, you should consider using :class:`~apscheduler.executors.pool.ProcessPoolExecutor` instead to make use of multiple CPU cores. You could even use both at once, adding the process pool executor as a secondary executor. When you schedule a job, you need to choose a *trigger* for it. The trigger determines the logic by which the dates/times are calculated when the job will be run. APScheduler comes with three built-in trigger types: * :mod:`~apscheduler.triggers.date`: use when you want to run the job just once at a certain point of time * :mod:`~apscheduler.triggers.interval`: use when you want to run the job at fixed intervals of time * :mod:`~apscheduler.triggers.cron`: use when you want to run the job periodically at certain time(s) of day It is also possible to combine multiple triggers into one which fires either on times agreed on by all the participating triggers, or when any of the triggers would fire. For more information, see the documentation for :mod:`combining triggers `. You can find the plugin names of each job store, executor and trigger type on their respective API documentation pages. .. _scheduler-config: Configuring the scheduler ------------------------- APScheduler provides many different ways to configure the scheduler. You can use a configuration dictionary or you can pass in the options as keyword arguments. You can also instantiate the scheduler first, add jobs and configure the scheduler afterwards. This way you get maximum flexibility for any environment. The full list of scheduler level configuration options can be found on the API reference of the :class:`~apscheduler.schedulers.base.BaseScheduler` class. Scheduler subclasses may also have additional options which are documented on their respective API references. Configuration options for individual job stores and executors can likewise be found on their API reference pages. Let's say you want to run BackgroundScheduler in your application with the default job store and the default executor:: from apscheduler.schedulers.background import BackgroundScheduler scheduler = BackgroundScheduler() # Initialize the rest of the application here, or before the scheduler initialization This will get you a BackgroundScheduler with a MemoryJobStore named "default" and a ThreadPoolExecutor named "default" with a default maximum thread count of 10. Now, suppose you want more. You want to have *two* job stores using *two* executors and you also want to tweak the default values for new jobs and set a different timezone. The following three examples are completely equivalent, and will get you: * a MongoDBJobStore named "mongo" * an SQLAlchemyJobStore named "default" (using SQLite) * a ThreadPoolExecutor named "default", with a worker count of 20 * a ProcessPoolExecutor named "processpool", with a worker count of 5 * UTC as the scheduler's timezone * coalescing turned off for new jobs by default * a default maximum instance limit of 3 for new jobs Method 1:: from pytz import utc from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.jobstores.mongodb import MongoDBJobStore from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor jobstores = { 'mongo': MongoDBJobStore(), 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite') } executors = { 'default': ThreadPoolExecutor(20), 'processpool': ProcessPoolExecutor(5) } job_defaults = { 'coalesce': False, 'max_instances': 3 } scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc) Method 2:: from apscheduler.schedulers.background import BackgroundScheduler # The "apscheduler." prefix is hard coded scheduler = BackgroundScheduler({ 'apscheduler.jobstores.mongo': { 'type': 'mongodb' }, 'apscheduler.jobstores.default': { 'type': 'sqlalchemy', 'url': 'sqlite:///jobs.sqlite' }, 'apscheduler.executors.default': { 'class': 'apscheduler.executors.pool:ThreadPoolExecutor', 'max_workers': '20' }, 'apscheduler.executors.processpool': { 'type': 'processpool', 'max_workers': '5' }, 'apscheduler.job_defaults.coalesce': 'false', 'apscheduler.job_defaults.max_instances': '3', 'apscheduler.timezone': 'UTC', }) Method 3:: from pytz import utc from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from apscheduler.executors.pool import ProcessPoolExecutor jobstores = { 'mongo': {'type': 'mongodb'}, 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite') } executors = { 'default': {'type': 'threadpool', 'max_workers': 20}, 'processpool': ProcessPoolExecutor(max_workers=5) } job_defaults = { 'coalesce': False, 'max_instances': 3 } scheduler = BackgroundScheduler() # .. do something else here, maybe add jobs etc. scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc) Starting the scheduler ---------------------- Starting the scheduler is done by simply calling :meth:`~apscheduler.schedulers.base.BaseScheduler.start` on the scheduler. For schedulers other than :class:`~apscheduler.schedulers.blocking.BlockingScheduler`, this call will return immediately and you can continue the initialization process of your application, possibly adding jobs to the scheduler. For BlockingScheduler, you will only want to call :meth:`~apscheduler.schedulers.base.BaseScheduler.start` after you're done with any initialization steps. .. note:: After the scheduler has been started, you can no longer alter its settings. Adding jobs ----------- There are two ways to add jobs to a scheduler: #. by calling :meth:`~apscheduler.schedulers.base.BaseScheduler.add_job` #. by decorating a function with :meth:`~apscheduler.schedulers.base.BaseScheduler.scheduled_job` The first way is the most common way to do it. The second way is mostly a convenience to declare jobs that don't change during the application's run time. The :meth:`~apscheduler.schedulers.base.BaseScheduler.add_job` method returns a :class:`apscheduler.job.Job` instance that you can use to modify or remove the job later. You can schedule jobs on the scheduler **at any time**. If the scheduler is not yet running when the job is added, the job will be scheduled *tentatively* and its first run time will only be computed when the scheduler starts. It is important to note that if you use an executor or job store that serializes the job, it will add a couple requirements on your job: #. The target callable must be globally accessible #. Any arguments to the callable must be serializable Of the builtin job stores, only MemoryJobStore doesn't serialize jobs. Of the builtin executors, only ProcessPoolExecutor will serialize jobs. .. important:: If you schedule jobs in a persistent job store during your application's initialization, you **MUST** define an explicit ID for the job and use ``replace_existing=True`` or you will get a new copy of the job every time your application restarts! .. tip:: To run a job immediately, omit ``trigger`` argument when adding the job. Removing jobs ------------- When you remove a job from the scheduler, it is removed from its associated job store and will not be executed anymore. There are two ways to make this happen: #. by calling :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job` with the job's ID and job store alias #. by calling :meth:`~apscheduler.job.Job.remove` on the Job instance you got from :meth:`~apscheduler.schedulers.base.BaseScheduler.add_job` The latter method is probably more convenient, but it requires that you store somewhere the :class:`~apscheduler.job.Job` instance you received when adding the job. For jobs scheduled via the :meth:`~apscheduler.schedulers.base.BaseScheduler.scheduled_job`, the first way is the only way. If the job's schedule ends (i.e. its trigger doesn't produce any further run times), it is automatically removed. Example:: job = scheduler.add_job(myfunc, 'interval', minutes=2) job.remove() Same, using an explicit job ID:: scheduler.add_job(myfunc, 'interval', minutes=2, id='my_job_id') scheduler.remove_job('my_job_id') Pausing and resuming jobs ------------------------- You can easily pause and resume jobs through either the :class:`~apscheduler.job.Job` instance or the scheduler itself. When a job is paused, its next run time is cleared and no further run times will be calculated for it until the job is resumed. To pause a job, use either method: * :meth:`apscheduler.job.Job.pause` * :meth:`apscheduler.schedulers.base.BaseScheduler.pause_job` To resume: * :meth:`apscheduler.job.Job.resume` * :meth:`apscheduler.schedulers.base.BaseScheduler.resume_job` Getting a list of scheduled jobs -------------------------------- To get a machine processable list of the scheduled jobs, you can use the :meth:`~apscheduler.schedulers.base.BaseScheduler.get_jobs` method. It will return a list of :class:`~apscheduler.job.Job` instances. If you're only interested in the jobs contained in a particular job store, then give a job store alias as the second argument. As a convenience, you can use the :meth:`~apscheduler.schedulers.base.BaseScheduler.print_jobs` method which will print out a formatted list of jobs, their triggers and next run times. Modifying jobs -------------- You can modify any job attributes by calling either :meth:`apscheduler.job.Job.modify` or :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job`. You can modify any Job attributes except for ``id``. Example:: job.modify(max_instances=6, name='Alternate name') If you want to reschedule the job -- that is, change its trigger, you can use either :meth:`apscheduler.job.Job.reschedule` or :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job`. These methods construct a new trigger for the job and recalculate its next run time based on the new trigger. Example:: scheduler.reschedule_job('my_job_id', trigger='cron', minute='*/5') Shutting down the scheduler --------------------------- To shut down the scheduler:: scheduler.shutdown() By default, the scheduler shuts down its job stores and executors and waits until all currently executing jobs are finished. If you don't want to wait, you can do:: scheduler.shutdown(wait=False) This will still shut down the job stores and executors but does not wait for any running tasks to complete. Pausing/resuming job processing ------------------------------- It is possible to pause the processing of scheduled jobs:: scheduler.pause() This will cause the scheduler to not wake up until processing is resumed:: scheduler.resume() It is also possible to start the scheduler in paused state, that is, without the first wakeup call:: scheduler.start(paused=True) This is useful when you need to prune unwanted jobs before they have a chance to run. Limiting the number of concurrently executing instances of a job ---------------------------------------------------------------- By default, only one instance of each job is allowed to be run at the same time. This means that if the job is about to be run but the previous run hasn't finished yet, then the latest run is considered a misfire. It is possible to set the maximum number of instances for a particular job that the scheduler will let run concurrently, by using the ``max_instances`` keyword argument when adding the job. .. _missed-job-executions: Missed job executions and coalescing ------------------------------------ Sometimes the scheduler may be unable to execute a scheduled job at the time it was scheduled to run. The most common case is when a job is scheduled in a persistent job store and the scheduler is shut down and restarted after the job was supposed to execute. When this happens, the job is considered to have "misfired". The scheduler will then check each missed execution time against the job's ``misfire_grace_time`` option (which can be set on per-job basis or globally in the scheduler) to see if the execution should still be triggered. This can lead into the job being executed several times in succession. If this behavior is undesirable for your particular use case, it is possible to use `coalescing` to roll all these missed executions into one. In other words, if coalescing is enabled for the job and the scheduler sees one or more queued executions for the job, it will only trigger it once. No misfire events will be sent for the "bypassed" runs. .. note:: If the execution of a job is delayed due to no threads or processes being available in the pool, the executor may skip it due to it being run too late (compared to its originally designated run time). If this is likely to happen in your application, you may want to either increase the number of threads/processes in the executor, or adjust the ``misfire_grace_time`` setting to a higher value. .. _scheduler-events: Scheduler events ---------------- It is possible to attach event listeners to the scheduler. Scheduler events are fired on certain occasions, and may carry additional information in them concerning the details of that particular event. It is possible to listen to only particular types of events by giving the appropriate ``mask`` argument to :meth:`~apscheduler.schedulers.base.BaseScheduler.add_listener`, OR'ing the different constants together. The listener callable is called with one argument, the event object. See the documentation for the :mod:`~apscheduler.events` module for specifics on the available events and their attributes. Example:: def my_listener(event): if event.exception: print('The job crashed :(') else: print('The job worked :)') scheduler.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR) .. _troubleshooting: Troubleshooting --------------- If the scheduler isn't working as expected, it will be helpful to increase the logging level of the ``apscheduler`` logger to the ``DEBUG`` level. If you do not yet have logging enabled in the first place, you can do this:: import logging logging.basicConfig() logging.getLogger('apscheduler').setLevel(logging.DEBUG) This should provide lots of useful information about what's going on inside the scheduler. Also make sure that you check the :doc:`faq` section to see if your problem already has a solution. Reporting bugs -------------- .. include:: ../README.rst :start-after: Reporting bugs -------------- apscheduler-3.6.3/docs/versionhistory.rst000066400000000000000000000302421356022153700206200ustar00rootroot00000000000000Version history =============== To find out how to migrate your application from a previous version of APScheduler, see the :doc:`migration section `. 3.6.3 ----- * Fixed Python 2.7 accidentally depending on the ``trollius`` package (regression from v3.6.2) 3.6.2 ----- * Fixed handling of :func:`~functools.partial` wrapped coroutine functions in ``AsyncIOExecutor`` and ``TornadoExecutor`` (PR by shipmints) 3.6.1 ----- * Fixed OverflowError on Qt scheduler when the wait time is very long * Fixed methods inherited from base class could not be executed by processpool executor (PR by Yang Jian) 3.6.0 ----- * Adapted ``RedisJobStore`` to v3.0 of the ``redis`` library * Adapted ``RethinkDBJobStore`` to v2.4 of the ``rethink`` library * Fixed ``DeprecationWarnings`` about ``collections.abc`` on Python 3.7 (PR by Roman Levin) 3.5.3 ----- * Fixed regression introduced in 3.5.2: Class methods were mistaken for instance methods and thus were broken during serialization * Fixed callable name detection for methods in old style classes 3.5.2 ----- * Fixed scheduling of bound methods on persistent job stores (the workaround of scheduling ``YourClass.methodname`` along with an explicit ``self`` argument is no longer necessary as this is now done automatically for you) * Added the FAQ section to the docs * Made ``BaseScheduler.start()`` raise a ``RuntimeError`` if running under uWSGI with threads disabled 3.5.1 ----- * Fixed ``OverflowError`` on Windows when the wait time is too long * Fixed ``CronTrigger`` sometimes producing fire times beyond ``end_date`` when jitter is enabled (thanks to gilbsgilbs for the tests) * Fixed ISO 8601 UTC offset information being silently discarded from string formatted datetimes by adding support for parsing them 3.5.0 ----- * Added the ``engine_options`` option to ``SQLAlchemyJobStore`` * Added the ``jitter`` options to ``IntervalTrigger`` and ``CronTrigger`` (thanks to gilbsgilbs) * Added combining triggers (``AndTrigger`` and ``OrTrigger``) * Added better validation for the steps and ranges of different expressions in ``CronTrigger`` * Added support for named months (``jan`` – ``dec``) in ``CronTrigger`` month expressions * Added support for creating a ``CronTrigger`` from a crontab expression * Allowed spaces around commas in ``CronTrigger`` fields * Fixed memory leak due to a cyclic reference when jobs raise exceptions (thanks to gilbsgilbs for help on solving this) * Fixed passing ``wait=True`` to ``AsyncIOScheduler.shutdown()`` (although it doesn't do much) * Cancel all pending futures when ``AsyncIOExecutor`` is shut down 3.4.0 ----- * Dropped support for Python 3.3 * Added the ability to specify the table schema for ``SQLAlchemyJobStore`` (thanks to Meir Tseitlin) * Added a workaround for the ``ImportError`` when used with PyInstaller and the likes (caused by the missing packaging metadata when APScheduler is packaged with these tools) 3.3.1 ----- * Fixed Python 2.7 compatibility in ``TornadoExecutor`` 3.3.0 ----- * The asyncio and Tornado schedulers can now run jobs targeting coroutine functions (requires Python 3.5; only native coroutines (``async def``) are supported) * The Tornado scheduler now uses TornadoExecutor as its default executor (see above as for why) * Added ZooKeeper job store (thanks to Jose Ignacio Villar for the patch) * Fixed job store failure (``get_due_jobs()``) causing the scheduler main loop to exit (it now waits a configurable number of seconds before retrying) * Fixed ``@scheduled_job`` not working when serialization is required (persistent job stores and ``ProcessPoolScheduler``) * Improved import logic in ``ref_to_obj()`` to avoid errors in cases where traversing the path with ``getattr()`` would not work (thanks to Jarek Glowacki for the patch) * Fixed CronTrigger's weekday position expressions failing on Python 3 * Fixed CronTrigger's range expressions sometimes allowing values outside the given range 3.2.0 ----- * Added the ability to pause and unpause the scheduler * Fixed pickling problems with persistent jobs when upgrading from 3.0.x * Fixed AttributeError when importing apscheduler with setuptools < 11.0 * Fixed some events missing from ``apscheduler.events.__all__`` and ``apscheduler.events.EVENTS_ALL`` * Fixed wrong run time being set for date trigger when the timezone isn't the same as the local one * Fixed builtin ``id()`` erroneously used in MongoDBJobStore's ``JobLookupError()`` * Fixed endless loop with CronTrigger that may occur when the computer's clock resolution is too low (thanks to Jinping Bai for the patch) 3.1.0 ----- * Added RethinkDB job store (contributed by Allen Sanabria) * Added method chaining to the ``modify_job()``, ``reschedule_job()``, ``pause_job()`` and ``resume_job()`` methods in ``BaseScheduler`` and the corresponding methods in the ``Job`` class * Added the EVENT_JOB_SUBMITTED event that indicates a job has been submitted to its executor. * Added the EVENT_JOB_MAX_INSTANCES event that indicates a job's execution was skipped due to its maximum number of concurrently running instances being reached * Added the time zone to the repr() output of ``CronTrigger`` and ``IntervalTrigger`` * Fixed rare race condition on scheduler ``shutdown()`` * Dropped official support for CPython 2.6 and 3.2 and PyPy3 * Moved the connection logic in database backed job stores to the ``start()`` method * Migrated to setuptools_scm for versioning * Deprecated the various version related variables in the ``apscheduler`` module (``apscheduler.version_info``, ``apscheduler.version``, ``apscheduler.release``, ``apscheduler.__version__``) 3.0.6 ----- * Fixed bug in the cron trigger that produced off-by-1-hour datetimes when crossing the daylight saving threshold (thanks to Tim Strazny for reporting) 3.0.5 ----- * Fixed cron trigger always coalescing missed run times into a single run time (contributed by Chao Liu) * Fixed infinite loop in the cron trigger when an out-of-bounds value was given in an expression * Fixed debug logging displaying the next wakeup time in the UTC timezone instead of the scheduler's configured timezone * Allowed unicode function references in Python 2 3.0.4 ----- * Fixed memory leak in the base executor class (contributed by Stefan Nordhausen) 3.0.3 ----- * Fixed compatibility with pymongo 3.0 3.0.2 ----- * Fixed ValueError when the target callable has a default keyword argument that wasn't overridden * Fixed wrong job sort order in some job stores * Fixed exception when loading all jobs from the redis job store when there are paused jobs in it * Fixed AttributeError when printing a job list when there were pending jobs * Added setuptools as an explicit requirement in install requirements 3.0.1 ----- * A wider variety of target callables can now be scheduled so that the jobs are still serializable (static methods on Python 3.3+, unbound methods on all except Python 3.2) * Attempting to serialize a non-serializable Job now raises a helpful exception during serialization. Thanks to Jeremy Morgan for pointing this out. * Fixed table creation with SQLAlchemyJobStore on MySQL/InnoDB * Fixed start date getting set too far in the future with a timezone different from the local one * Fixed _run_job_error() being called with the incorrect number of arguments in most executors 3.0.0 ----- * Added support for timezones (special thanks to Curtis Vogt for help with this one) * Split the old Scheduler class into BlockingScheduler and BackgroundScheduler and added integration for asyncio (PEP 3156), Gevent, Tornado, Twisted and Qt event loops * Overhauled the job store system for much better scalability * Added the ability to modify, reschedule, pause and resume jobs * Dropped the Shelve job store because it could not work with the new job store system * Dropped the max_runs option and run counting of jobs since it could not be implemented reliably * Adding jobs is now done exclusively through ``add_job()`` -- the shortcuts to triggers were removed * Added the ``end_date`` parameter to cron and interval triggers * It is now possible to add a job directly to an executor without scheduling, by omitting the trigger argument * Replaced the thread pool with a pluggable executor system * Added support for running jobs in subprocesses (via the ``processpool`` executor) * Switched from nose to py.test for running unit tests 2.1.0 ----- * Added Redis job store * Added a "standalone" mode that runs the scheduler in the calling thread * Fixed disk synchronization in ShelveJobStore * Switched to PyPy 1.9 for PyPy compatibility testing * Dropped Python 2.4 support * Fixed SQLAlchemy 0.8 compatibility in SQLAlchemyJobStore * Various documentation improvements 2.0.3 ----- * The scheduler now closes the job store that is being removed, and all job stores on shutdown() by default * Added the ``last`` expression in the day field of CronTrigger (thanks rcaselli) * Raise a TypeError when fields with invalid names are passed to CronTrigger (thanks Christy O'Reilly) * Fixed the persistent.py example by shutting down the scheduler on Ctrl+C * Added PyPy 1.8 and CPython 3.3 to the test suite * Dropped PyPy 1.4 - 1.5 and CPython 3.1 from the test suite * Updated setup.cfg for compatibility with distutils2/packaging * Examples, documentation sources and unit tests are now packaged in the source distribution 2.0.2 ----- * Removed the unique constraint from the "name" column in the SQLAlchemy job store * Fixed output from Scheduler.print_jobs() which did not previously output a line ending at the end 2.0.1 ----- * Fixed cron style jobs getting wrong default values 2.0.0 ----- * Added configurable job stores with several persistent back-ends (shelve, SQLAlchemy and MongoDB) * Added the possibility to listen for job events (execution, error, misfire, finish) on a scheduler * Added an optional start time for cron-style jobs * Added optional job execution coalescing for situations where several executions of the job are due * Added an option to limit the maximum number of concurrenctly executing instances of the job * Allowed configuration of misfire grace times on a per-job basis * Allowed jobs to be explicitly named * All triggers now accept dates in string form (YYYY-mm-dd HH:MM:SS) * Jobs are now run in a thread pool; you can either supply your own PEP 3148 compliant thread pool or let APScheduler create its own * Maximum run count can be configured for all jobs, not just those using interval-based scheduling * Fixed a v1.x design flaw that caused jobs to be executed twice when the scheduler thread was woken up while still within the allowable range of their previous execution time (issues #5, #7) * Changed defaults for cron-style jobs to be more intuitive -- it will now default to all minimum values for fields lower than the least significant explicitly defined field 1.3.1 ----- * Fixed time difference calculation to take into account shifts to and from daylight saving time 1.3.0 ----- * Added __repr__() implementations to expressions, fields, triggers, and jobs to help with debugging * Added the dump_jobs method on Scheduler, which gives a helpful listing of all jobs scheduled on it * Fixed positional weekday (3th fri etc.) expressions not working except in some edge cases (fixes #2) * Removed autogenerated API documentation for modules which are not part of the public API, as it might confuse some users .. Note:: Positional weekdays are now used with the **day** field, not **weekday**. 1.2.1 ----- * Fixed regression: add_cron_job() in Scheduler was creating a CronTrigger with the wrong parameters (fixes #1, #3) * Fixed: if the scheduler is restarted, clear the "stopped" flag to allow jobs to be scheduled again 1.2.0 ----- * Added the ``week`` option for cron schedules * Added the ``daemonic`` configuration option * Fixed a bug in cron expression lists that could cause valid firing times to be missed * Fixed unscheduling bound methods via unschedule_func() * Changed CronTrigger constructor argument names to match those in Scheduler 1.01 ---- * Fixed a corner case where the combination of hour and day_of_week parameters would cause incorrect timing for a cron trigger apscheduler-3.6.3/examples/000077500000000000000000000000001356022153700156445ustar00rootroot00000000000000apscheduler-3.6.3/examples/executors/000077500000000000000000000000001356022153700176655ustar00rootroot00000000000000apscheduler-3.6.3/examples/executors/processpool.py000066400000000000000000000011231356022153700226040ustar00rootroot00000000000000""" Demonstrates how to schedule a job to be run in a process pool on 3 second intervals. """ from datetime import datetime import os from apscheduler.schedulers.blocking import BlockingScheduler def tick(): print('Tick! The time is: %s' % datetime.now()) if __name__ == '__main__': scheduler = BlockingScheduler() scheduler.add_executor('processpool') scheduler.add_job(tick, 'interval', seconds=3) print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/examples/jobstores/000077500000000000000000000000001356022153700176565ustar00rootroot00000000000000apscheduler-3.6.3/examples/jobstores/mongodb.py000066400000000000000000000021621356022153700216560ustar00rootroot00000000000000""" This example demonstrates the use of the MongoDB job store. On each run, it adds a new alarm that fires after ten seconds. You can exit the program, restart it and observe that any previous alarms that have not fired yet are still active. Running the example with the --clear switch will remove any existing alarms. """ from datetime import datetime, timedelta import sys import os from apscheduler.schedulers.blocking import BlockingScheduler def alarm(time): print('Alarm! This alarm was scheduled at %s.' % time) if __name__ == '__main__': scheduler = BlockingScheduler() scheduler.add_jobstore('mongodb', collection='example_jobs') if len(sys.argv) > 1 and sys.argv[1] == '--clear': scheduler.remove_all_jobs() alarm_time = datetime.now() + timedelta(seconds=10) scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()]) print('To clear the alarms, run this example with the --clear argument.') print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/examples/jobstores/redis_.py000066400000000000000000000022171356022153700214770ustar00rootroot00000000000000""" This example demonstrates the use of the Redis job store. On each run, it adds a new alarm that fires after ten seconds. You can exit the program, restart it and observe that any previous alarms that have not fired yet are still active. Running the example with the --clear switch will remove any existing alarms. """ from datetime import datetime, timedelta import sys import os from apscheduler.schedulers.blocking import BlockingScheduler def alarm(time): print('Alarm! This alarm was scheduled at %s.' % time) if __name__ == '__main__': scheduler = BlockingScheduler() scheduler.add_jobstore('redis', jobs_key='example.jobs', run_times_key='example.run_times') if len(sys.argv) > 1 and sys.argv[1] == '--clear': scheduler.remove_all_jobs() alarm_time = datetime.now() + timedelta(seconds=10) scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()]) print('To clear the alarms, run this example with the --clear argument.') print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/examples/jobstores/rethinkdb_.py000066400000000000000000000021731356022153700223440ustar00rootroot00000000000000""" This example demonstrates the use of the RethinkDB job store. On each run, it adds a new alarm that fires after ten seconds. You can exit the program, restart it and observe that any previous alarms that have not fired yet are still active. Running the example with the --clear switch will remove any existing alarms. """ from datetime import datetime, timedelta import sys import os from apscheduler.schedulers.blocking import BlockingScheduler def alarm(time): print('Alarm! This alarm was scheduled at %s.' % time) if __name__ == '__main__': scheduler = BlockingScheduler() scheduler.add_jobstore('rethinkdb', database='apscheduler_example') if len(sys.argv) > 1 and sys.argv[1] == '--clear': scheduler.remove_all_jobs() alarm_time = datetime.now() + timedelta(seconds=10) scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()]) print('To clear the alarms, run this example with the --clear argument.') print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/examples/jobstores/sqlalchemy_.py000066400000000000000000000021561356022153700225350ustar00rootroot00000000000000""" This example demonstrates the use of the SQLAlchemy job store. On each run, it adds a new alarm that fires after ten seconds. You can exit the program, restart it and observe that any previous alarms that have not fired yet are still active. You can also give it the database URL as an argument. See the SQLAlchemy documentation on how to construct those. """ from datetime import datetime, timedelta import sys import os from apscheduler.schedulers.blocking import BlockingScheduler def alarm(time): print('Alarm! This alarm was scheduled at %s.' % time) if __name__ == '__main__': scheduler = BlockingScheduler() url = sys.argv[1] if len(sys.argv) > 1 else 'sqlite:///example.sqlite' scheduler.add_jobstore('sqlalchemy', url=url) alarm_time = datetime.now() + timedelta(seconds=10) scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()]) print('To clear the alarms, delete the example.sqlite file.') print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/examples/jobstores/zookeeper.py000066400000000000000000000021611356022153700222330ustar00rootroot00000000000000""" This example demonstrates the use of the Zookeeper job store. On each run, it adds a new alarm that fires after ten seconds. You can exit the program, restart it and observe that any previous alarms that have not fired yet are still active. Running the example with the --clear switch will remove any existing alarms. """ from datetime import datetime, timedelta import sys import os from apscheduler.schedulers.blocking import BlockingScheduler def alarm(time): print('Alarm! This alarm was scheduled at %s.' % time) if __name__ == '__main__': scheduler = BlockingScheduler() scheduler.add_jobstore('zookeeper', path='/example_jobs') if len(sys.argv) > 1 and sys.argv[1] == '--clear': scheduler.remove_all_jobs() alarm_time = datetime.now() + timedelta(seconds=10) scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()]) print('To clear the alarms, run this example with the --clear argument.') print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/examples/misc/000077500000000000000000000000001356022153700165775ustar00rootroot00000000000000apscheduler-3.6.3/examples/misc/reference.py000066400000000000000000000007361356022153700211150ustar00rootroot00000000000000""" Basic example showing how to schedule a callable using a textual reference. """ import os from apscheduler.schedulers.blocking import BlockingScheduler if __name__ == '__main__': scheduler = BlockingScheduler() scheduler.add_job('sys:stdout.write', 'interval', seconds=3, args=['tick\n']) print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/examples/rpc/000077500000000000000000000000001356022153700164305ustar00rootroot00000000000000apscheduler-3.6.3/examples/rpc/client.py000066400000000000000000000007421356022153700202630ustar00rootroot00000000000000""" This is an example RPC client that connects to the RPyC based scheduler service. It first connects to the RPyC server on localhost:12345. Then it schedules a job to run on 2 second intervals and sleeps for 10 seconds. After that, it unschedules the job and exits. """ from time import sleep import rpyc conn = rpyc.connect('localhost', 12345) job = conn.root.add_job('server:print_text', 'interval', args=['Hello, World'], seconds=2) sleep(10) conn.root.remove_job(job.id) apscheduler-3.6.3/examples/rpc/server.py000066400000000000000000000034401356022153700203110ustar00rootroot00000000000000""" This is an example showing how to make the scheduler into a remotely accessible service. It uses RPyC to set up a service through which the scheduler can be made to add, modify and remove jobs. To run, first install RPyC using pip. Then change the working directory to the ``rpc`` directory and run it with ``python -m server``. """ import rpyc from rpyc.utils.server import ThreadedServer from apscheduler.schedulers.background import BackgroundScheduler def print_text(text): print(text) class SchedulerService(rpyc.Service): def exposed_add_job(self, func, *args, **kwargs): return scheduler.add_job(func, *args, **kwargs) def exposed_modify_job(self, job_id, jobstore=None, **changes): return scheduler.modify_job(job_id, jobstore, **changes) def exposed_reschedule_job(self, job_id, jobstore=None, trigger=None, **trigger_args): return scheduler.reschedule_job(job_id, jobstore, trigger, **trigger_args) def exposed_pause_job(self, job_id, jobstore=None): return scheduler.pause_job(job_id, jobstore) def exposed_resume_job(self, job_id, jobstore=None): return scheduler.resume_job(job_id, jobstore) def exposed_remove_job(self, job_id, jobstore=None): scheduler.remove_job(job_id, jobstore) def exposed_get_job(self, job_id): return scheduler.get_job(job_id) def exposed_get_jobs(self, jobstore=None): return scheduler.get_jobs(jobstore) if __name__ == '__main__': scheduler = BackgroundScheduler() scheduler.start() protocol_config = {'allow_public_attrs': True} server = ThreadedServer(SchedulerService, port=12345, protocol_config=protocol_config) try: server.start() except (KeyboardInterrupt, SystemExit): pass finally: scheduler.shutdown() apscheduler-3.6.3/examples/schedulers/000077500000000000000000000000001356022153700200055ustar00rootroot00000000000000apscheduler-3.6.3/examples/schedulers/asyncio_.py000066400000000000000000000014101356022153700221570ustar00rootroot00000000000000""" Demonstrates how to use the asyncio compatible scheduler to schedule a job that executes on 3 second intervals. """ from datetime import datetime import os from apscheduler.schedulers.asyncio import AsyncIOScheduler try: import asyncio except ImportError: import trollius as asyncio def tick(): print('Tick! The time is: %s' % datetime.now()) if __name__ == '__main__': scheduler = AsyncIOScheduler() scheduler.add_job(tick, 'interval', seconds=3) scheduler.start() print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed. try: asyncio.get_event_loop().run_forever() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/examples/schedulers/background.py000066400000000000000000000015001356022153700224720ustar00rootroot00000000000000""" Demonstrates how to use the background scheduler to schedule a job that executes on 3 second intervals. """ from datetime import datetime import time import os from apscheduler.schedulers.background import BackgroundScheduler def tick(): print('Tick! The time is: %s' % datetime.now()) if __name__ == '__main__': scheduler = BackgroundScheduler() scheduler.add_job(tick, 'interval', seconds=3) scheduler.start() print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) try: # This is here to simulate application activity (which keeps the main thread alive). while True: time.sleep(2) except (KeyboardInterrupt, SystemExit): # Not strictly necessary if daemonic mode is enabled but should be done if possible scheduler.shutdown() apscheduler-3.6.3/examples/schedulers/blocking.py000066400000000000000000000010711356022153700221460ustar00rootroot00000000000000""" Demonstrates how to use the blocking scheduler to schedule a job that executes on 3 second intervals. """ from datetime import datetime import os from apscheduler.schedulers.blocking import BlockingScheduler def tick(): print('Tick! The time is: %s' % datetime.now()) if __name__ == '__main__': scheduler = BlockingScheduler() scheduler.add_job(tick, 'interval', seconds=3) print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/examples/schedulers/gevent_.py000066400000000000000000000013201356022153700220020ustar00rootroot00000000000000""" Demonstrates how to use the gevent compatible scheduler to schedule a job that executes on 3 second intervals. """ from datetime import datetime import os from apscheduler.schedulers.gevent import GeventScheduler def tick(): print('Tick! The time is: %s' % datetime.now()) if __name__ == '__main__': scheduler = GeventScheduler() scheduler.add_job(tick, 'interval', seconds=3) g = scheduler.start() # g is the greenlet that runs the scheduler loop print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed. try: g.join() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/examples/schedulers/qt.py000066400000000000000000000020571356022153700210070ustar00rootroot00000000000000""" Demonstrates how to use the Qt compatible scheduler to schedule a job that executes on 3 second intervals. """ from datetime import datetime import signal import sys from apscheduler.schedulers.qt import QtScheduler try: from PyQt5.QtWidgets import QApplication, QLabel except ImportError: try: from PyQt4.QtGui import QApplication, QLabel except ImportError: from PySide.QtGui import QApplication, QLabel def tick(): label.setText('Tick! The time is: %s' % datetime.now()) if __name__ == '__main__': app = QApplication(sys.argv) # This enables processing of Ctrl+C keypresses signal.signal(signal.SIGINT, lambda *args: QApplication.quit()) label = QLabel('The timer text will appear here in a moment!') label.setWindowTitle('QtScheduler example') label.setFixedSize(280, 50) label.show() scheduler = QtScheduler() scheduler.add_job(tick, 'interval', seconds=3) scheduler.start() # Execution will block here until the user closes the windows or Ctrl+C is pressed. app.exec_() apscheduler-3.6.3/examples/schedulers/tornado_.py000066400000000000000000000013211356022153700221610ustar00rootroot00000000000000""" Demonstrates how to use the Tornado compatible scheduler to schedule a job that executes on 3 second intervals. """ from datetime import datetime import os from tornado.ioloop import IOLoop from apscheduler.schedulers.tornado import TornadoScheduler def tick(): print('Tick! The time is: %s' % datetime.now()) if __name__ == '__main__': scheduler = TornadoScheduler() scheduler.add_job(tick, 'interval', seconds=3) scheduler.start() print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed. try: IOLoop.instance().start() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/examples/schedulers/twisted_.py000066400000000000000000000013101356022153700221740ustar00rootroot00000000000000""" Demonstrates how to use the Twisted compatible scheduler to schedule a job that executes on 3 second intervals. """ from datetime import datetime import os from twisted.internet import reactor from apscheduler.schedulers.twisted import TwistedScheduler def tick(): print('Tick! The time is: %s' % datetime.now()) if __name__ == '__main__': scheduler = TwistedScheduler() scheduler.add_job(tick, 'interval', seconds=3) scheduler.start() print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed. try: reactor.run() except (KeyboardInterrupt, SystemExit): pass apscheduler-3.6.3/setup.cfg000066400000000000000000000005121356022153700156450ustar00rootroot00000000000000[build_sphinx] source-dir = docs build-dir = docs/_build [upload_docs] upload-dir = docs/_build/html [tool:pytest] addopts = -rsx --cov --tb=short testpaths = tests [coverage:run] source = apscheduler [coverage:report] show_missing = true [flake8] max-line-length = 99 exclude = .tox,build,docs [bdist_wheel] universal = 1 apscheduler-3.6.3/setup.py000066400000000000000000000071441356022153700155460ustar00rootroot00000000000000# coding: utf-8 import os.path from setuptools import setup, find_packages here = os.path.dirname(__file__) readme_path = os.path.join(here, 'README.rst') readme = open(readme_path).read() setup( name='APScheduler', use_scm_version={ 'version_scheme': 'post-release', 'local_scheme': 'dirty-tag' }, description='In-process task scheduler with Cron-like capabilities', long_description=readme, author=u'Alex Grönholm', author_email='apscheduler@nextday.fi', url='https://github.com/agronholm/apscheduler', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7' ], keywords='scheduling cron', license='MIT', packages=find_packages(exclude=['tests']), setup_requires=[ 'setuptools_scm' ], install_requires=[ 'setuptools >= 0.7', 'six >= 1.4.0', 'pytz', 'tzlocal >= 1.2', ], extras_require={ ':python_version == "2.7"': ['futures', 'funcsigs'], 'asyncio:python_version == "2.7"': ['trollius'], 'gevent': ['gevent'], 'mongodb': ['pymongo >= 2.8'], 'redis': ['redis >= 3.0'], 'rethinkdb': ['rethinkdb >= 2.4.0'], 'sqlalchemy': ['sqlalchemy >= 0.8'], 'tornado': ['tornado >= 4.3'], 'twisted': ['twisted'], 'zookeeper': ['kazoo'], 'testing': [ 'pytest', 'pytest-cov', 'pytest-tornado5' ], 'testing:python_version == "2.7"': ['mock'], 'testing:python_version == "3.4"': ['pytest_asyncio < 0.6'], 'testing:python_version >= "3.5"': ['pytest_asyncio'], 'doc': [ 'sphinx', 'sphinx-rtd-theme', ], }, zip_safe=False, entry_points={ 'apscheduler.triggers': [ 'date = apscheduler.triggers.date:DateTrigger', 'interval = apscheduler.triggers.interval:IntervalTrigger', 'cron = apscheduler.triggers.cron:CronTrigger', 'and = apscheduler.triggers.combining:AndTrigger', 'or = apscheduler.triggers.combining:OrTrigger' ], 'apscheduler.executors': [ 'debug = apscheduler.executors.debug:DebugExecutor', 'threadpool = apscheduler.executors.pool:ThreadPoolExecutor', 'processpool = apscheduler.executors.pool:ProcessPoolExecutor', 'asyncio = apscheduler.executors.asyncio:AsyncIOExecutor [asyncio]', 'gevent = apscheduler.executors.gevent:GeventExecutor [gevent]', 'tornado = apscheduler.executors.tornado:TornadoExecutor [tornado]', 'twisted = apscheduler.executors.twisted:TwistedExecutor [twisted]' ], 'apscheduler.jobstores': [ 'memory = apscheduler.jobstores.memory:MemoryJobStore', 'sqlalchemy = apscheduler.jobstores.sqlalchemy:SQLAlchemyJobStore [sqlalchemy]', 'mongodb = apscheduler.jobstores.mongodb:MongoDBJobStore [mongodb]', 'rethinkdb = apscheduler.jobstores.rethinkdb:RethinkDBJobStore [rethinkdb]', 'redis = apscheduler.jobstores.redis:RedisJobStore [redis]', 'zookeeper = apscheduler.jobstores.zookeeper:ZooKeeperJobStore [zookeeper]' ] } ) apscheduler-3.6.3/tests/000077500000000000000000000000001356022153700151705ustar00rootroot00000000000000apscheduler-3.6.3/tests/__init__.py000066400000000000000000000000001356022153700172670ustar00rootroot00000000000000apscheduler-3.6.3/tests/conftest.py000066400000000000000000000063711356022153700173760ustar00rootroot00000000000000# coding: utf-8 from datetime import datetime import sys import pytest import pytz from apscheduler.job import Job from apscheduler.schedulers.base import BaseScheduler from apscheduler.schedulers.blocking import BlockingScheduler try: from unittest.mock import Mock except ImportError: from mock import Mock def pytest_ignore_collect(path, config): return path.basename.endswith('_py35.py') and sys.version_info < (3, 5) def minpython(*version): version_str = '.'.join([str(num) for num in version]) def outer(func): dec = pytest.mark.skipif(sys.version_info < version, reason='Requires Python >= %s' % version_str) return dec(func) return outer def maxpython(*version): version_str = '.'.join([str(num) for num in version]) def outer(func): dec = pytest.mark.skipif(sys.version_info >= version, reason='Requires Python < %s' % version_str) return dec(func) return outer @pytest.fixture def timezone(monkeypatch): tz = pytz.timezone('Europe/Berlin') monkeypatch.setattr('apscheduler.schedulers.base.get_localzone', Mock(return_value=tz)) return tz @pytest.fixture def freeze_time(monkeypatch, timezone): class TimeFreezer: def __init__(self, initial): self.current = initial self.increment = None def get(self, tzinfo=None): now = self.current.astimezone(tzinfo) if tzinfo else self.current.replace(tzinfo=None) if self.increment: self.current += self.increment return now def set(self, new_time): self.current = new_time def next(self,): return self.current + self.increment def set_increment(self, delta): self.increment = delta freezer = TimeFreezer(timezone.localize(datetime(2011, 4, 3, 18, 40))) fake_datetime = Mock(datetime, now=freezer.get) monkeypatch.setattr('apscheduler.schedulers.base.datetime', fake_datetime) monkeypatch.setattr('apscheduler.executors.base.datetime', fake_datetime) monkeypatch.setattr('apscheduler.triggers.interval.datetime', fake_datetime) monkeypatch.setattr('apscheduler.triggers.date.datetime', fake_datetime) return freezer @pytest.fixture def job_defaults(timezone): run_date = timezone.localize(datetime(2011, 4, 3, 18, 40)) return {'trigger': 'date', 'trigger_args': {'run_date': run_date, 'timezone': timezone}, 'executor': 'default', 'args': (), 'kwargs': {}, 'id': b't\xc3\xa9st\xc3\xafd'.decode('utf-8'), 'misfire_grace_time': 1, 'coalesce': False, 'name': b'n\xc3\xa4m\xc3\xa9'.decode('utf-8'), 'max_instances': 1} @pytest.fixture def create_job(job_defaults, timezone): def create(**kwargs): kwargs.setdefault('scheduler', Mock(BaseScheduler, timezone=timezone)) job_kwargs = job_defaults.copy() job_kwargs.update(kwargs) job_kwargs['trigger'] = BlockingScheduler()._create_trigger(job_kwargs.pop('trigger'), job_kwargs.pop('trigger_args')) job_kwargs.setdefault('next_run_time', None) return Job(**job_kwargs) return create apscheduler-3.6.3/tests/test_executors.py000066400000000000000000000106411356022153700206240ustar00rootroot00000000000000from datetime import datetime from threading import Event from types import TracebackType import gc import time import pytest from pytz import UTC from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_MISSED, EVENT_JOB_EXECUTED from apscheduler.executors.base import MaxInstancesReachedError, run_job from apscheduler.job import Job from apscheduler.schedulers.base import BaseScheduler try: from unittest.mock import Mock, MagicMock, patch except ImportError: from mock import Mock, MagicMock, patch @pytest.fixture def mock_scheduler(timezone): scheduler_ = Mock(BaseScheduler, timezone=timezone) scheduler_._create_lock = MagicMock() return scheduler_ @pytest.fixture(params=['threadpool', 'processpool']) def executor(request, mock_scheduler): if request.param == 'threadpool': from apscheduler.executors.pool import ThreadPoolExecutor executor_ = ThreadPoolExecutor() else: from apscheduler.executors.pool import ProcessPoolExecutor executor_ = ProcessPoolExecutor() executor_.start(mock_scheduler, 'dummy') yield executor_ executor_.shutdown() def wait_event(): time.sleep(0.2) return 'test' def failure(): raise Exception('test failure') def success(): return 5 def test_max_instances(mock_scheduler, executor, create_job, freeze_time): """Tests that the maximum instance limit on a job is respected.""" events = [] mock_scheduler._dispatch_event = lambda event: events.append(event) job = create_job(func=wait_event, max_instances=2, next_run_time=None) executor.submit_job(job, [freeze_time.current]) executor.submit_job(job, [freeze_time.current]) pytest.raises(MaxInstancesReachedError, executor.submit_job, job, [freeze_time.current]) executor.shutdown() assert len(events) == 2 assert events[0].retval == 'test' assert events[1].retval == 'test' @pytest.mark.parametrize('event_code,func', [ (EVENT_JOB_EXECUTED, success), (EVENT_JOB_MISSED, failure), (EVENT_JOB_ERROR, failure) ], ids=['executed', 'missed', 'error']) def test_submit_job(mock_scheduler, executor, create_job, freeze_time, timezone, event_code, func): """ Tests that an EVENT_JOB_EXECUTED event is delivered to the scheduler if the job was successfully executed. """ mock_scheduler._dispatch_event = MagicMock() job = create_job(func=func, id='foo') job._jobstore_alias = 'test_jobstore' run_time = (timezone.localize(datetime(1970, 1, 1)) if event_code == EVENT_JOB_MISSED else freeze_time.current) executor.submit_job(job, [run_time]) executor.shutdown() assert mock_scheduler._dispatch_event.call_count == 1 event = mock_scheduler._dispatch_event.call_args[0][0] assert event.code == event_code assert event.job_id == 'foo' assert event.jobstore == 'test_jobstore' if event_code == EVENT_JOB_EXECUTED: assert event.retval == 5 elif event_code == EVENT_JOB_ERROR: assert str(event.exception) == 'test failure' assert isinstance(event.traceback, str) class FauxJob(object): id = 'abc' max_instances = 1 _jobstore_alias = 'foo' def dummy_run_job(job, jobstore_alias, run_times, logger_name): raise Exception('dummy') def test_run_job_error(monkeypatch, executor): """Tests that _run_job_error is properly called if an exception is raised in run_job()""" def run_job_error(job_id, exc, traceback): assert job_id == 'abc' exc_traceback[:] = [exc, traceback] event.set() event = Event() exc_traceback = [None, None] monkeypatch.setattr('apscheduler.executors.base.run_job', dummy_run_job) monkeypatch.setattr('apscheduler.executors.pool.run_job', dummy_run_job) monkeypatch.setattr(executor, '_run_job_error', run_job_error) executor.submit_job(FauxJob(), []) event.wait(5) assert str(exc_traceback[0]) == "dummy" if exc_traceback[1] is not None: assert isinstance(exc_traceback[1], TracebackType) def test_run_job_memory_leak(): class FooBar(object): pass def func(): foo = FooBar() # noqa: F841 raise Exception('dummy') fake_job = Mock(Job, func=func, args=(), kwargs={}, misfire_grace_time=1) with patch('logging.getLogger'): for _ in range(5): run_job(fake_job, 'foo', [datetime.now(UTC)], __name__) foos = [x for x in gc.get_objects() if type(x) is FooBar] assert len(foos) == 0 apscheduler-3.6.3/tests/test_executors_py35.py000066400000000000000000000063721356022153700215120ustar00rootroot00000000000000"""Contains test functions using Python 3.3+ syntax.""" from asyncio import CancelledError from datetime import datetime import pytest from apscheduler.executors.asyncio import AsyncIOExecutor from apscheduler.executors.tornado import TornadoExecutor from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.schedulers.tornado import TornadoScheduler from pytz import utc @pytest.fixture def asyncio_scheduler(event_loop): scheduler = AsyncIOScheduler(event_loop=event_loop) scheduler.start(paused=True) yield scheduler scheduler.shutdown(False) @pytest.fixture def asyncio_executor(asyncio_scheduler): executor = AsyncIOExecutor() executor.start(asyncio_scheduler, 'default') yield executor executor.shutdown() @pytest.fixture def tornado_scheduler(io_loop): scheduler = TornadoScheduler(io_loop=io_loop) scheduler.start(paused=True) yield scheduler scheduler.shutdown(False) @pytest.fixture def tornado_executor(tornado_scheduler): executor = TornadoExecutor() executor.start(tornado_scheduler, 'default') yield executor executor.shutdown() async def waiter(sleep, exception): await sleep(0.1) if exception: raise Exception('dummy error') else: return True @pytest.mark.parametrize('exception', [False, True]) @pytest.mark.asyncio async def test_run_coroutine_job(asyncio_scheduler, asyncio_executor, exception): from asyncio import Future, sleep future = Future() job = asyncio_scheduler.add_job(waiter, 'interval', seconds=1, args=[sleep, exception]) asyncio_executor._run_job_success = lambda job_id, events: future.set_result(events) asyncio_executor._run_job_error = lambda job_id, exc, tb: future.set_exception(exc) asyncio_executor.submit_job(job, [datetime.now(utc)]) events = await future assert len(events) == 1 if exception: assert str(events[0].exception) == 'dummy error' else: assert events[0].retval is True @pytest.mark.parametrize('exception', [False, True]) @pytest.mark.gen_test async def test_run_coroutine_job_tornado(tornado_scheduler, tornado_executor, exception): from tornado.concurrent import Future from tornado.gen import sleep future = Future() job = tornado_scheduler.add_job(waiter, 'interval', seconds=1, args=[sleep, exception]) tornado_executor._run_job_success = lambda job_id, events: future.set_result(events) tornado_executor._run_job_error = lambda job_id, exc, tb: future.set_exception(exc) tornado_executor.submit_job(job, [datetime.now(utc)]) events = await future assert len(events) == 1 if exception: assert str(events[0].exception) == 'dummy error' else: assert events[0].retval is True @pytest.mark.asyncio async def test_asyncio_executor_shutdown(asyncio_scheduler, asyncio_executor): """Test that the AsyncIO executor cancels its pending tasks on shutdown.""" from asyncio import sleep job = asyncio_scheduler.add_job(waiter, 'interval', seconds=1, args=[sleep, None]) asyncio_executor.submit_job(job, [datetime.now(utc)]) futures = asyncio_executor._pending_futures.copy() assert len(futures) == 1 asyncio_executor.shutdown() with pytest.raises(CancelledError): await futures.pop() apscheduler-3.6.3/tests/test_expressions.py000066400000000000000000000120011356022153700211550ustar00rootroot00000000000000from datetime import datetime import pytest from apscheduler.triggers.cron.fields import DayOfMonthField, BaseField, DayOfWeekField from apscheduler.triggers.cron.expressions import ( AllExpression, RangeExpression, WeekdayPositionExpression, WeekdayRangeExpression, LastDayOfMonthExpression) def test_all_expression(): field = DayOfMonthField('day', '*') assert repr(field) == "DayOfMonthField('day', '*')" date = datetime(2009, 7, 1) assert field.get_next_value(date) == 1 date = datetime(2009, 7, 10) assert field.get_next_value(date) == 10 date = datetime(2009, 7, 30) assert field.get_next_value(date) == 30 def test_all_expression_step(): field = BaseField('hour', '*/3') assert repr(field) == "BaseField('hour', '*/3')" date = datetime(2009, 7, 1, 0) assert field.get_next_value(date) == 0 date = datetime(2009, 7, 1, 2) assert field.get_next_value(date) == 3 date = datetime(2009, 7, 1, 7) assert field.get_next_value(date) == 9 def test_all_expression_invalid(): pytest.raises(ValueError, BaseField, 'hour', '*/0') def test_all_expression_repr(): expr = AllExpression() assert repr(expr) == 'AllExpression(None)' def test_all_expression_step_repr(): expr = AllExpression(2) assert repr(expr) == "AllExpression(2)" def test_range_expression(): field = DayOfMonthField('day', '2-9') assert repr(field) == "DayOfMonthField('day', '2-9')" date = datetime(2009, 7, 1) assert field.get_next_value(date) == 2 date = datetime(2009, 7, 10) assert field.get_next_value(date) is None date = datetime(2009, 7, 5) assert field.get_next_value(date) == 5 def test_range_expression_step(): field = DayOfMonthField('day', '2-9/3') assert repr(field) == "DayOfMonthField('day', '2-9/3')" date = datetime(2009, 7, 1) assert field.get_next_value(date) == 2 date = datetime(2009, 7, 3) assert field.get_next_value(date) == 5 date = datetime(2009, 7, 9) assert field.get_next_value(date) is None def test_range_expression_single(): field = DayOfMonthField('day', 9) assert repr(field) == "DayOfMonthField('day', '9')" date = datetime(2009, 7, 1) assert field.get_next_value(date) == 9 date = datetime(2009, 7, 9) assert field.get_next_value(date) == 9 date = datetime(2009, 7, 10) assert field.get_next_value(date) is None def test_range_expression_invalid(): pytest.raises(ValueError, DayOfMonthField, 'day', '5-3') def test_range_expression_repr(): expr = RangeExpression(3, 7) assert repr(expr) == 'RangeExpression(3, 7)' def test_range_expression_single_repr(): expr = RangeExpression(4) assert repr(expr) == 'RangeExpression(4)' def test_range_expression_step_repr(): expr = RangeExpression(3, 7, 2) assert repr(expr) == 'RangeExpression(3, 7, 2)' def test_weekday_single(): field = DayOfWeekField('day_of_week', 'WED') assert repr(field) == "DayOfWeekField('day_of_week', 'wed')" date = datetime(2008, 2, 4) assert field.get_next_value(date) == 2 def test_weekday_range(): field = DayOfWeekField('day_of_week', 'TUE-SAT') assert repr(field) == "DayOfWeekField('day_of_week', 'tue-sat')" date = datetime(2008, 2, 7) assert field.get_next_value(date) == 3 def test_weekday_pos_1(): expr = WeekdayPositionExpression('1st', 'Fri') assert str(expr) == '1st fri' date = datetime(2008, 2, 1) assert expr.get_next_value(date, 'day') == 1 def test_weekday_pos_2(): expr = WeekdayPositionExpression('2nd', 'wed') assert str(expr) == '2nd wed' date = datetime(2008, 2, 1) assert expr.get_next_value(date, 'day') == 13 def test_weekday_pos_3(): expr = WeekdayPositionExpression('last', 'fri') assert str(expr) == 'last fri' date = datetime(2008, 2, 1) assert expr.get_next_value(date, 'day') == 29 def test_day_of_week_invalid_pos(): pytest.raises(ValueError, WeekdayPositionExpression, '6th', 'fri') def test_day_of_week_invalid_name(): pytest.raises(ValueError, WeekdayPositionExpression, '1st', 'moh') def test_weekday_position_expression_repr(): expr = WeekdayPositionExpression('2nd', 'FRI') assert repr(expr) == "WeekdayPositionExpression('2nd', 'fri')" def test_day_of_week_invalid_first(): pytest.raises(ValueError, WeekdayRangeExpression, 'moh', 'fri') def test_day_of_week_invalid_last(): pytest.raises(ValueError, WeekdayRangeExpression, 'mon', 'fre') def test_weekday_range_expression_repr(): expr = WeekdayRangeExpression('tue', 'SUN') assert repr(expr) == "WeekdayRangeExpression('tue', 'sun')" def test_weekday_range_expression_single_repr(): expr = WeekdayRangeExpression('thu') assert repr(expr) == "WeekdayRangeExpression('thu')" def test_last_day_of_month_expression(): expr = LastDayOfMonthExpression() date = datetime(2012, 2, 1) assert expr.get_next_value(date, 'day') == 29 def test_last_day_of_month_expression_invalid(): expr = LastDayOfMonthExpression() assert repr(expr) == "LastDayOfMonthExpression()" apscheduler-3.6.3/tests/test_job.py000066400000000000000000000225671356022153700173670ustar00rootroot00000000000000# coding: utf-8 from datetime import datetime, timedelta from functools import partial import pytest import six from apscheduler.job import Job from apscheduler.schedulers.base import BaseScheduler from apscheduler.triggers.date import DateTrigger try: from unittest.mock import MagicMock, patch except ImportError: from mock import MagicMock, patch def dummyfunc(): pass @pytest.fixture def job(create_job): return create_job(func=dummyfunc) @pytest.mark.parametrize('job_id', ['testid', None]) def test_constructor(job_id): with patch('apscheduler.job.Job._modify') as _modify: scheduler_mock = MagicMock(BaseScheduler) job = Job(scheduler_mock, id=job_id) assert job._scheduler is scheduler_mock assert job._jobstore_alias is None modify_kwargs = _modify.call_args[1] if job_id is None: assert len(modify_kwargs['id']) == 32 else: assert modify_kwargs['id'] == job_id def test_modify(job): job.modify(bah=1, foo='x') job._scheduler.modify_job.assert_called_once_with(job.id, None, bah=1, foo='x') def test_reschedule(job): job.reschedule('trigger', bah=1, foo='x') job._scheduler.reschedule_job.assert_called_once_with(job.id, None, 'trigger', bah=1, foo='x') def test_pause(job): job.pause() job._scheduler.pause_job.assert_called_once_with(job.id, None) def test_resume(job): job.resume() job._scheduler.resume_job.assert_called_once_with(job.id, None) def test_remove(job): job.remove() job._scheduler.remove_job.assert_called_once_with(job.id, None) def test_pending(job): """ Tests that the "pending" property return True when _jobstore_alias is a string, ``False`` otherwise. """ assert job.pending job._jobstore_alias = 'test' assert not job.pending def test_get_run_times(create_job, timezone): run_time = timezone.localize(datetime(2010, 12, 13, 0, 8)) expected_times = [run_time + timedelta(seconds=1), run_time + timedelta(seconds=2)] job = create_job(trigger='interval', trigger_args={'seconds': 1, 'timezone': timezone, 'start_date': run_time}, next_run_time=expected_times[0], func=dummyfunc) run_times = job._get_run_times(run_time) assert run_times == [] run_times = job._get_run_times(expected_times[0]) assert run_times == [expected_times[0]] run_times = job._get_run_times(expected_times[1]) assert run_times == expected_times def test_private_modify_bad_id(job): """Tests that only strings are accepted for job IDs.""" del job.id exc = pytest.raises(TypeError, job._modify, id=3) assert str(exc.value) == 'id must be a nonempty string' def test_private_modify_id(job): """Tests that the job ID can't be changed.""" exc = pytest.raises(ValueError, job._modify, id='alternate') assert str(exc.value) == 'The job ID may not be changed' def test_private_modify_bad_func(job): """Tests that given a func of something else than a callable or string raises a TypeError.""" exc = pytest.raises(TypeError, job._modify, func=1) assert str(exc.value) == 'func must be a callable or a textual reference to one' def test_private_modify_func_ref(job): """Tests that the target callable can be given as a textual reference.""" job._modify(func='tests.test_job:dummyfunc') assert job.func is dummyfunc assert job.func_ref == 'tests.test_job:dummyfunc' def test_private_modify_unreachable_func(job): """Tests that func_ref remains None if no reference to the target callable can be found.""" func = partial(dummyfunc) job._modify(func=func) assert job.func is func assert job.func_ref is None def test_private_modify_update_name(job): """Tests that the name attribute defaults to the function name.""" del job.name job._modify(func=dummyfunc) assert job.name == 'dummyfunc' def test_private_modify_bad_args(job): """ Tests that passing an argument list of the wrong type raises a TypeError.""" exc = pytest.raises(TypeError, job._modify, args=1) assert str(exc.value) == 'args must be a non-string iterable' def test_private_modify_bad_kwargs(job): """Tests that passing an argument list of the wrong type raises a TypeError.""" exc = pytest.raises(TypeError, job._modify, kwargs=1) assert str(exc.value) == 'kwargs must be a dict-like object' @pytest.mark.parametrize('value', [1, ''], ids=['integer', 'empty string']) def test_private_modify_bad_name(job, value): """ Tests that passing an empty name or a name of something else than a string raises a TypeError. """ exc = pytest.raises(TypeError, job._modify, name=value) assert str(exc.value) == 'name must be a nonempty string' @pytest.mark.parametrize('value', ['foo', 0, -1], ids=['string', 'zero', 'negative']) def test_private_modify_bad_misfire_grace_time(job, value): """Tests that passing a misfire_grace_time of the wrong type raises a TypeError.""" exc = pytest.raises(TypeError, job._modify, misfire_grace_time=value) assert str(exc.value) == 'misfire_grace_time must be either None or a positive integer' @pytest.mark.parametrize('value', [None, 'foo', 0, -1], ids=['None', 'string', 'zero', 'negative']) def test_private_modify_bad_max_instances(job, value): """Tests that passing a max_instances of the wrong type raises a TypeError.""" exc = pytest.raises(TypeError, job._modify, max_instances=value) assert str(exc.value) == 'max_instances must be a positive integer' def test_private_modify_bad_trigger(job): """Tests that passing a trigger of the wrong type raises a TypeError.""" exc = pytest.raises(TypeError, job._modify, trigger='foo') assert str(exc.value) == 'Expected a trigger instance, got str instead' def test_private_modify_bad_executor(job): """Tests that passing an executor of the wrong type raises a TypeError.""" exc = pytest.raises(TypeError, job._modify, executor=1) assert str(exc.value) == 'executor must be a string' def test_private_modify_bad_next_run_time(job): """Tests that passing a next_run_time of the wrong type raises a TypeError.""" exc = pytest.raises(TypeError, job._modify, next_run_time=1) assert str(exc.value) == 'Unsupported type for next_run_time: int' def test_private_modify_bad_argument(job): """Tests that passing an unmodifiable argument type raises an AttributeError.""" exc = pytest.raises(AttributeError, job._modify, scheduler=1) assert str(exc.value) == 'The following are not modifiable attributes of Job: scheduler' def test_getstate(job): state = job.__getstate__() assert state == dict( version=1, trigger=job.trigger, executor='default', func='tests.test_job:dummyfunc', name=b'n\xc3\xa4m\xc3\xa9'.decode('utf-8'), args=(), kwargs={}, id=b't\xc3\xa9st\xc3\xafd'.decode('utf-8'), misfire_grace_time=1, coalesce=False, max_instances=1, next_run_time=None) def test_setstate(job, timezone): trigger = DateTrigger('2010-12-14 13:05:00', timezone) state = dict( version=1, scheduler=MagicMock(), jobstore=MagicMock(), trigger=trigger, executor='dummyexecutor', func='tests.test_job:dummyfunc', name='testjob.dummyfunc', args=[], kwargs={}, id='other_id', misfire_grace_time=2, coalesce=True, max_instances=2, next_run_time=None) job.__setstate__(state) assert job.id == 'other_id' assert job.func == dummyfunc assert job.func_ref == 'tests.test_job:dummyfunc' assert job.trigger == trigger assert job.executor == 'dummyexecutor' assert job.args == [] assert job.kwargs == {} assert job.name == 'testjob.dummyfunc' assert job.misfire_grace_time == 2 assert job.coalesce is True assert job.max_instances == 2 assert job.next_run_time is None def test_setstate_bad_version(job): """Tests that __setstate__ rejects state of higher version that it was designed to handle.""" exc = pytest.raises(ValueError, job.__setstate__, {'version': 9999}) assert 'Job has version 9999, but only version' in str(exc.value) def test_eq(create_job): job = create_job(func=lambda: None, id='foo') job2 = create_job(func=lambda: None, id='foo') job3 = create_job(func=lambda: None, id='bar') assert job == job2 assert not job == job3 assert not job == 'foo' def test_repr(job): if six.PY2: assert repr(job) == '' else: assert repr(job) == \ b''.decode('utf-8') @pytest.mark.parametrize('status, expected_status', [ ('scheduled', 'next run at: 2011-04-03 18:40:00 CEST'), ('paused', 'paused'), ('pending', 'pending') ], ids=['scheduled', 'paused', 'pending']) @pytest.mark.parametrize('unicode', [False, True], ids=['nativestr', 'unicode']) def test_str(create_job, status, unicode, expected_status): job = create_job(func=dummyfunc) if status == 'scheduled': job.next_run_time = job.trigger.run_date elif status == 'pending': del job.next_run_time if six.PY2 and not unicode: expected = 'n\\xe4m\\xe9 (trigger: date[2011-04-03 18:40:00 CEST], %s)' % expected_status else: expected = b'n\xc3\xa4m\xc3\xa9 (trigger: date[2011-04-03 18:40:00 CEST], %s)'.\ decode('utf-8') % expected_status result = job.__unicode__() if unicode else job.__str__() assert result == expected apscheduler-3.6.3/tests/test_jobstores.py000066400000000000000000000325521356022153700206220ustar00rootroot00000000000000from datetime import datetime import pytest from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.jobstores.base import JobLookupError, ConflictingIdError def dummy_job(): pass def dummy_job2(): pass def dummy_job3(): pass class DummyClass: def dummy_method(self, a, b): return a + b @classmethod def dummy_classmethod(cls, a, b): return a + b @pytest.fixture def memjobstore(): yield MemoryJobStore() @pytest.fixture def sqlalchemyjobstore(tmpdir): db_path = tmpdir.join('apscheduler_unittest.sqlite') sqlalchemy = pytest.importorskip('apscheduler.jobstores.sqlalchemy') store = sqlalchemy.SQLAlchemyJobStore(url='sqlite:///%s' % db_path) store.start(None, 'sqlalchemy') yield store store.shutdown() db_path.remove() @pytest.fixture def rethinkdbjobstore(): rethinkdb = pytest.importorskip('apscheduler.jobstores.rethinkdb') store = rethinkdb.RethinkDBJobStore(database='apscheduler_unittest') store.start(None, 'rethinkdb') yield store store.r.db_drop('apscheduler_unittest').run(store.conn) store.shutdown() @pytest.fixture def mongodbjobstore(): mongodb = pytest.importorskip('apscheduler.jobstores.mongodb') store = mongodb.MongoDBJobStore(database='apscheduler_unittest') store.start(None, 'mongodb') yield store store.client.drop_database(store.collection.database.name) store.shutdown() @pytest.fixture def redisjobstore(): redis = pytest.importorskip('apscheduler.jobstores.redis') store = redis.RedisJobStore() store.start(None, 'redis') yield store store.remove_all_jobs() store.shutdown() @pytest.fixture def zookeeperjobstore(): zookeeper = pytest.importorskip('apscheduler.jobstores.zookeeper') store = zookeeper.ZooKeeperJobStore(path='/apscheduler_unittest') store.start(None, 'zookeeper') yield store store.remove_all_jobs() store.shutdown() @pytest.fixture(params=['memjobstore', 'sqlalchemyjobstore', 'mongodbjobstore', 'redisjobstore', 'rethinkdbjobstore', 'zookeeperjobstore'], ids=['memory', 'sqlalchemy', 'mongodb', 'redis', 'rethinkdb', 'zookeeper']) def jobstore(request): return request.getfixturevalue(request.param) @pytest.fixture(params=['sqlalchemyjobstore', 'mongodbjobstore', 'redisjobstore', 'rethinkdbjobstore', 'zookeeperjobstore'], ids=['sqlalchemy', 'mongodb', 'redis', 'rethinkdb', 'zookeeper']) def persistent_jobstore(request): return request.getfixturevalue(request.param) @pytest.fixture def create_add_job(timezone, create_job): def create(jobstore, func=dummy_job, run_date=datetime(2999, 1, 1), id=None, paused=False, **kwargs): run_date = timezone.localize(run_date) job = create_job(func=func, trigger='date', trigger_args={'run_date': run_date}, id=id, **kwargs) job.next_run_time = None if paused else job.trigger.get_next_fire_time(None, run_date) if jobstore: jobstore.add_job(job) return job return create def test_add_instance_method_job(jobstore, create_add_job): instance = DummyClass() initial_job = create_add_job(jobstore, instance.dummy_method, kwargs={'a': 1, 'b': 2}) job = jobstore.lookup_job(initial_job.id) assert job.func(*job.args, **job.kwargs) == 3 def test_add_class_method_job(jobstore, create_add_job): initial_job = create_add_job(jobstore, DummyClass.dummy_classmethod, kwargs={'a': 1, 'b': 2}) job = jobstore.lookup_job(initial_job.id) assert job.func(*job.args, **job.kwargs) == 3 def test_lookup_job(jobstore, create_add_job): initial_job = create_add_job(jobstore) job = jobstore.lookup_job(initial_job.id) assert job == initial_job def test_lookup_nonexistent_job(jobstore): assert jobstore.lookup_job('foo') is None def test_get_all_jobs(jobstore, create_add_job): job1 = create_add_job(jobstore, dummy_job, datetime(2016, 5, 3)) job2 = create_add_job(jobstore, dummy_job2, datetime(2013, 8, 14)) job3 = create_add_job(jobstore, dummy_job2, datetime(2013, 7, 11), paused=True) jobs = jobstore.get_all_jobs() assert jobs == [job2, job1, job3] def test_get_pending_jobs(jobstore, create_add_job, timezone): create_add_job(jobstore, dummy_job, datetime(2016, 5, 3)) job2 = create_add_job(jobstore, dummy_job2, datetime(2014, 2, 26)) job3 = create_add_job(jobstore, dummy_job3, datetime(2013, 8, 14)) create_add_job(jobstore, dummy_job3, datetime(2013, 7, 11), paused=True) jobs = jobstore.get_due_jobs(timezone.localize(datetime(2014, 2, 27))) assert jobs == [job3, job2] jobs = jobstore.get_due_jobs(timezone.localize(datetime(2013, 8, 13))) assert jobs == [] def test_get_pending_jobs_subsecond_difference(jobstore, create_add_job, timezone): job1 = create_add_job(jobstore, dummy_job, datetime(2014, 7, 7, 0, 0, 0, 401)) job2 = create_add_job(jobstore, dummy_job2, datetime(2014, 7, 7, 0, 0, 0, 402)) job3 = create_add_job(jobstore, dummy_job3, datetime(2014, 7, 7, 0, 0, 0, 400)) jobs = jobstore.get_due_jobs(timezone.localize(datetime(2014, 7, 7, 1))) assert jobs == [job3, job1, job2] def test_get_next_run_time(jobstore, create_add_job, timezone): create_add_job(jobstore, dummy_job, datetime(2016, 5, 3)) create_add_job(jobstore, dummy_job2, datetime(2014, 2, 26)) create_add_job(jobstore, dummy_job3, datetime(2013, 8, 14)) create_add_job(jobstore, dummy_job3, datetime(2013, 7, 11), paused=True) assert jobstore.get_next_run_time() == timezone.localize(datetime(2013, 8, 14)) def test_add_job_conflicting_id(jobstore, create_add_job): create_add_job(jobstore, dummy_job, datetime(2016, 5, 3), id='blah') pytest.raises(ConflictingIdError, create_add_job, jobstore, dummy_job2, datetime(2014, 2, 26), id='blah') def test_update_job(jobstore, create_add_job, timezone): job1 = create_add_job(jobstore, dummy_job, datetime(2016, 5, 3)) job2 = create_add_job(jobstore, dummy_job2, datetime(2014, 2, 26)) replacement = create_add_job(None, dummy_job, datetime(2016, 5, 4), id=job1.id, max_instances=6) assert replacement.max_instances == 6 jobstore.update_job(replacement) jobs = jobstore.get_all_jobs() assert len(jobs) == 2 assert jobs[0].id == job2.id assert jobs[1].id == job1.id assert jobs[1].next_run_time == timezone.localize(datetime(2016, 5, 4)) assert jobs[1].max_instances == 6 @pytest.mark.parametrize('next_run_time', [datetime(2013, 8, 13), None], ids=['earlier', 'null']) def test_update_job_next_runtime(jobstore, create_add_job, next_run_time, timezone): job1 = create_add_job(jobstore, dummy_job, datetime(2016, 5, 3)) create_add_job(jobstore, dummy_job2, datetime(2014, 2, 26)) job3 = create_add_job(jobstore, dummy_job3, datetime(2013, 8, 14)) job1.next_run_time = timezone.localize(next_run_time) if next_run_time else None jobstore.update_job(job1) if next_run_time: assert jobstore.get_next_run_time() == job1.next_run_time else: assert jobstore.get_next_run_time() == job3.next_run_time @pytest.mark.parametrize('next_run_time', [datetime(2013, 8, 13), None], ids=['earlier', 'null']) @pytest.mark.parametrize('index', [0, 1, 2], ids=['first', 'middle', 'last']) def test_update_job_clear_next_runtime(jobstore, create_add_job, next_run_time, timezone, index): """ Tests that update_job() maintains the proper ordering of the jobs, even when their next run times are initially the same. """ jobs = [create_add_job(jobstore, dummy_job, datetime(2014, 2, 26), 'job%d' % i) for i in range(3)] jobs[index].next_run_time = timezone.localize(next_run_time) if next_run_time else None jobstore.update_job(jobs[index]) due_date = timezone.localize(datetime(2014, 2, 27)) due_jobs = jobstore.get_due_jobs(due_date) assert len(due_jobs) == (3 if next_run_time else 2) due_job_ids = [job.id for job in due_jobs] if next_run_time: if index == 0: assert due_job_ids == ['job0', 'job1', 'job2'] elif index == 1: assert due_job_ids == ['job1', 'job0', 'job2'] else: assert due_job_ids == ['job2', 'job0', 'job1'] else: if index == 0: assert due_job_ids == ['job1', 'job2'] elif index == 1: assert due_job_ids == ['job0', 'job2'] else: assert due_job_ids == ['job0', 'job1'] def test_update_job_nonexistent_job(jobstore, create_add_job): job = create_add_job(None, dummy_job, datetime(2016, 5, 3)) pytest.raises(JobLookupError, jobstore.update_job, job) def test_one_job_fails_to_load(persistent_jobstore, create_add_job, monkeypatch, timezone): job1 = create_add_job(persistent_jobstore, dummy_job, datetime(2016, 5, 3)) job2 = create_add_job(persistent_jobstore, dummy_job2, datetime(2014, 2, 26)) create_add_job(persistent_jobstore, dummy_job3, datetime(2013, 8, 14)) # Make the dummy_job2 function disappear monkeypatch.delitem(globals(), 'dummy_job3') jobs = persistent_jobstore.get_all_jobs() assert jobs == [job2, job1] assert persistent_jobstore.get_next_run_time() == timezone.localize(datetime(2014, 2, 26)) def test_remove_job(jobstore, create_add_job): job1 = create_add_job(jobstore, dummy_job, datetime(2016, 5, 3)) job2 = create_add_job(jobstore, dummy_job2, datetime(2014, 2, 26)) jobstore.remove_job(job1.id) jobs = jobstore.get_all_jobs() assert jobs == [job2] jobstore.remove_job(job2.id) jobs = jobstore.get_all_jobs() assert jobs == [] def test_remove_nonexistent_job(jobstore): pytest.raises(JobLookupError, jobstore.remove_job, 'blah') def test_remove_all_jobs(jobstore, create_add_job): create_add_job(jobstore, dummy_job, datetime(2016, 5, 3)) create_add_job(jobstore, dummy_job2, datetime(2014, 2, 26)) jobstore.remove_all_jobs() jobs = jobstore.get_all_jobs() assert jobs == [] def test_repr_memjobstore(memjobstore): assert repr(memjobstore) == '' def test_repr_sqlalchemyjobstore(sqlalchemyjobstore): assert repr(sqlalchemyjobstore).startswith('' def test_repr_zookeeperjobstore(zookeeperjobstore): class_sig = "") assert str(trigger) == "cron[year='2009/2', month='1/3', day='5-13']" start_date = timezone.localize(datetime(2008, 12, 1)) correct_next_date = timezone.localize(datetime(2009, 1, 5)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_cron_trigger_2(self, timezone): trigger = CronTrigger(year='2009/2', month='1/3', day='5-13', timezone=timezone) start_date = timezone.localize(datetime(2009, 10, 14)) correct_next_date = timezone.localize(datetime(2011, 1, 5)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_cron_trigger_3(self, timezone): trigger = CronTrigger(year='2009', month='feb-dec', hour='8-10', timezone=timezone) assert repr(trigger) == ("") start_date = timezone.localize(datetime(2009, 1, 1)) correct_next_date = timezone.localize(datetime(2009, 2, 1, 8)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_cron_trigger_4(self, timezone): trigger = CronTrigger(year='2012', month='2', day='last', timezone=timezone) assert repr(trigger) == ("") start_date = timezone.localize(datetime(2012, 2, 1)) correct_next_date = timezone.localize(datetime(2012, 2, 29)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_start_end_times_string(self, timezone, monkeypatch): monkeypatch.setattr('apscheduler.triggers.cron.get_localzone', Mock(return_value=timezone)) trigger = CronTrigger(start_date='2016-11-05 05:06:53', end_date='2017-11-05 05:11:32') assert trigger.start_date == timezone.localize(datetime(2016, 11, 5, 5, 6, 53)) assert trigger.end_date == timezone.localize(datetime(2017, 11, 5, 5, 11, 32)) def test_cron_zero_value(self, timezone): trigger = CronTrigger(year=2009, month=2, hour=0, timezone=timezone) assert repr(trigger) == ("") def test_cron_year_list(self, timezone): trigger = CronTrigger(year='2009,2008', timezone=timezone) assert repr(trigger) == "" assert str(trigger) == "cron[year='2009,2008']" start_date = timezone.localize(datetime(2009, 1, 1)) correct_next_date = timezone.localize(datetime(2009, 1, 1)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_cron_start_date(self, timezone): trigger = CronTrigger(year='2009', month='2', hour='8-10', start_date='2009-02-03 11:00:00', timezone=timezone) assert repr(trigger) == ("") assert str(trigger) == "cron[year='2009', month='2', hour='8-10']" start_date = timezone.localize(datetime(2009, 1, 1)) correct_next_date = timezone.localize(datetime(2009, 2, 4, 8)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_previous_fire_time_1(self, timezone): """Test for previous_fire_time arg in get_next_fire_time()""" trigger = CronTrigger(day="*", timezone=timezone) previous_fire_time = timezone.localize(datetime(2015, 11, 23)) now = timezone.localize(datetime(2015, 11, 26)) correct_next_date = timezone.localize(datetime(2015, 11, 24)) assert trigger.get_next_fire_time(previous_fire_time, now) == correct_next_date def test_previous_fire_time_2(self, timezone): trigger = CronTrigger(day="*", timezone=timezone) previous_fire_time = timezone.localize(datetime(2015, 11, 23)) now = timezone.localize(datetime(2015, 11, 22)) correct_next_date = timezone.localize(datetime(2015, 11, 22)) assert trigger.get_next_fire_time(previous_fire_time, now) == correct_next_date def test_previous_fire_time_3(self, timezone): trigger = CronTrigger(day="*", timezone=timezone) previous_fire_time = timezone.localize(datetime(2016, 4, 25)) now = timezone.localize(datetime(2016, 4, 25)) correct_next_date = timezone.localize(datetime(2016, 4, 26)) assert trigger.get_next_fire_time(previous_fire_time, now) == correct_next_date def test_cron_weekday_overlap(self, timezone): trigger = CronTrigger(year=2009, month=1, day='6-10', day_of_week='2-4', timezone=timezone) assert repr(trigger) == ("") assert str(trigger) == "cron[year='2009', month='1', day='6-10', day_of_week='2-4']" start_date = timezone.localize(datetime(2009, 1, 1)) correct_next_date = timezone.localize(datetime(2009, 1, 7)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_cron_weekday_nomatch(self, timezone): trigger = CronTrigger(year=2009, month=1, day='6-10', day_of_week='0,6', timezone=timezone) assert repr(trigger) == ("") assert str(trigger) == "cron[year='2009', month='1', day='6-10', day_of_week='0,6']" start_date = timezone.localize(datetime(2009, 1, 1)) correct_next_date = None assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_cron_weekday_positional(self, timezone): trigger = CronTrigger(year=2009, month=1, day='4th wed', timezone=timezone) assert repr(trigger) == ("") assert str(trigger) == "cron[year='2009', month='1', day='4th wed']" start_date = timezone.localize(datetime(2009, 1, 1)) correct_next_date = timezone.localize(datetime(2009, 1, 28)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_week_1(self, timezone): trigger = CronTrigger(year=2009, month=2, week=8, timezone=timezone) assert repr(trigger) == ("") assert str(trigger) == "cron[year='2009', month='2', week='8']" start_date = timezone.localize(datetime(2009, 1, 1)) correct_next_date = timezone.localize(datetime(2009, 2, 16)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_week_2(self, timezone): trigger = CronTrigger(year=2009, week=15, day_of_week=2, timezone=timezone) assert repr(trigger) == ("") assert str(trigger) == "cron[year='2009', week='15', day_of_week='2']" start_date = timezone.localize(datetime(2009, 1, 1)) correct_next_date = timezone.localize(datetime(2009, 4, 8)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_cron_extra_coverage(self, timezone): # This test has no value other than patching holes in test coverage trigger = CronTrigger(day='6,8', timezone=timezone) assert repr(trigger) == "" assert str(trigger) == "cron[day='6,8']" start_date = timezone.localize(datetime(2009, 12, 31)) correct_next_date = timezone.localize(datetime(2010, 1, 6)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_cron_faulty_expr(self, timezone): pytest.raises(ValueError, CronTrigger, year='2009-fault', timezone=timezone) def test_cron_increment_weekday(self, timezone): """ Tests that incrementing the weekday field in the process of calculating the next matching date won't cause problems. """ trigger = CronTrigger(hour='5-6', timezone=timezone) assert repr(trigger) == "" assert str(trigger) == "cron[hour='5-6']" start_date = timezone.localize(datetime(2009, 9, 25, 7)) correct_next_date = timezone.localize(datetime(2009, 9, 26, 5)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_cron_bad_kwarg(self, timezone): pytest.raises(TypeError, CronTrigger, second=0, third=1, timezone=timezone) def test_month_rollover(self, timezone): trigger = CronTrigger(timezone=timezone, day=30) now = timezone.localize(datetime(2016, 2, 1)) expected = timezone.localize(datetime(2016, 3, 30)) assert trigger.get_next_fire_time(None, now) == expected def test_timezone_from_start_date(self, timezone): """ Tests that the trigger takes the timezone from the start_date parameter if no timezone is supplied. """ start_date = timezone.localize(datetime(2014, 4, 13, 5, 30)) trigger = CronTrigger(year=2014, hour=4, start_date=start_date) assert trigger.timezone == start_date.tzinfo def test_end_date(self, timezone): end_date = timezone.localize(datetime(2014, 4, 13, 3)) trigger = CronTrigger(year=2014, hour=4, end_date=end_date) start_date = timezone.localize(datetime(2014, 4, 13, 2, 30)) assert trigger.get_next_fire_time(None, start_date - timedelta(1)) == \ start_date.replace(day=12, hour=4, minute=0) assert trigger.get_next_fire_time(None, start_date) is None def test_different_tz(self, timezone): alter_tz = pytz.FixedOffset(-600) trigger = CronTrigger(year=2009, week=15, day_of_week=2, timezone=timezone) assert repr(trigger) == ("") assert str(trigger) == "cron[year='2009', week='15', day_of_week='2']" start_date = alter_tz.localize(datetime(2008, 12, 31, 22)) correct_next_date = timezone.localize(datetime(2009, 4, 8)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date @pytest.mark.parametrize('trigger_args, start_date, start_date_dst, correct_next_date', [ ({'hour': 8}, datetime(2013, 3, 9, 12), False, datetime(2013, 3, 10, 8)), ({'hour': 8}, datetime(2013, 11, 2, 12), True, datetime(2013, 11, 3, 8)), ({'minute': '*/30'}, datetime(2013, 3, 10, 1, 35), False, datetime(2013, 3, 10, 3)), ({'minute': '*/30'}, datetime(2013, 11, 3, 1, 35), True, datetime(2013, 11, 3, 1)) ], ids=['absolute_spring', 'absolute_autumn', 'interval_spring', 'interval_autumn']) def test_dst_change(self, trigger_args, start_date, start_date_dst, correct_next_date): """ Making sure that CronTrigger works correctly when crossing the DST switch threshold. Note that you should explicitly compare datetimes as strings to avoid the internal datetime comparison which would test for equality in the UTC timezone. """ timezone = pytz.timezone('US/Eastern') trigger = CronTrigger(timezone=timezone, **trigger_args) start_date = timezone.localize(start_date, is_dst=start_date_dst) correct_next_date = timezone.localize(correct_next_date, is_dst=not start_date_dst) assert str(trigger.get_next_fire_time(None, start_date)) == str(correct_next_date) def test_timezone_change(self, timezone): """ Ensure that get_next_fire_time method returns datetimes in the timezone of the trigger and not in the timezone of the passed in start_date. """ est = pytz.FixedOffset(-300) cst = pytz.FixedOffset(-360) trigger = CronTrigger(hour=11, minute='*/5', timezone=est) start_date = cst.localize(datetime(2009, 9, 26, 10, 16)) correct_next_date = est.localize(datetime(2009, 9, 26, 11, 20)) assert str(trigger.get_next_fire_time(None, start_date)) == str(correct_next_date) def test_pickle(self, timezone): """Test that the trigger is pickleable.""" trigger = CronTrigger(year=2016, month='5-6', day='20-28', hour=7, minute=25, second='*', timezone=timezone) data = pickle.dumps(trigger, 2) trigger2 = pickle.loads(data) for attr in CronTrigger.__slots__: assert getattr(trigger2, attr) == getattr(trigger, attr) def test_jitter_produces_differrent_valid_results(self, timezone): trigger = CronTrigger(minute='*', jitter=5) now = timezone.localize(datetime(2017, 11, 12, 6, 55, 30)) results = set() for _ in range(0, 100): next_fire_time = trigger.get_next_fire_time(None, now) results.add(next_fire_time) assert timedelta(seconds=25) <= (next_fire_time - now) <= timedelta(seconds=35) assert 1 < len(results) def test_jitter_with_timezone(self, timezone): est = pytz.FixedOffset(-300) cst = pytz.FixedOffset(-360) trigger = CronTrigger(hour=11, minute='*/5', timezone=est, jitter=5) start_date = cst.localize(datetime(2009, 9, 26, 10, 16)) correct_next_date = est.localize(datetime(2009, 9, 26, 11, 20)) for _ in range(0, 100): assert abs(trigger.get_next_fire_time(None, start_date) - correct_next_date) <= timedelta(seconds=5) @pytest.mark.parametrize('trigger_args, start_date, start_date_dst, correct_next_date', [ ({'hour': 8}, datetime(2013, 3, 9, 12), False, datetime(2013, 3, 10, 8)), ({'hour': 8}, datetime(2013, 11, 2, 12), True, datetime(2013, 11, 3, 8)), ({'minute': '*/30'}, datetime(2013, 3, 10, 1, 35), False, datetime(2013, 3, 10, 3)), ({'minute': '*/30'}, datetime(2013, 11, 3, 1, 35), True, datetime(2013, 11, 3, 1)) ], ids=['absolute_spring', 'absolute_autumn', 'interval_spring', 'interval_autumn']) def test_jitter_dst_change(self, trigger_args, start_date, start_date_dst, correct_next_date): timezone = pytz.timezone('US/Eastern') trigger = CronTrigger(timezone=timezone, jitter=5, **trigger_args) start_date = timezone.localize(start_date, is_dst=start_date_dst) correct_next_date = timezone.localize(correct_next_date, is_dst=not start_date_dst) for _ in range(0, 100): next_fire_time = trigger.get_next_fire_time(None, start_date) assert abs(next_fire_time - correct_next_date) <= timedelta(seconds=5) def test_jitter_with_end_date(self, timezone): now = timezone.localize(datetime(2017, 11, 12, 6, 55, 30)) end_date = timezone.localize(datetime(2017, 11, 12, 6, 56, 0)) trigger = CronTrigger(minute='*', jitter=5, end_date=end_date) for _ in range(0, 100): next_fire_time = trigger.get_next_fire_time(None, now) assert next_fire_time is None or next_fire_time <= end_date @pytest.mark.parametrize('values, expected', [ (dict(day='*/31'), r"Error validating expression '\*/31': the step value \(31\) is higher " r"than the total range of the expression \(30\)"), (dict(day='4-6/3'), r"Error validating expression '4-6/3': the step value \(3\) is higher " r"than the total range of the expression \(2\)"), (dict(hour='0-24'), r"Error validating expression '0-24': the last value \(24\) is higher " r"than the maximum value \(23\)"), (dict(day='0-3'), r"Error validating expression '0-3': the first value \(0\) is lower " r"than the minimum value \(1\)") ], ids=['too_large_step_all', 'too_large_step_range', 'too_high_last', 'too_low_first']) def test_invalid_ranges(self, values, expected): pytest.raises(ValueError, CronTrigger, **values).match(expected) @pytest.mark.parametrize('expr, expected_repr', [ ('* * * * *', ""), ('0-14 * 14-28 jul fri', ""), (' 0-14 * 14-28 jul fri', "") ], ids=['always', 'assorted', 'multiple_spaces_in_format']) def test_from_crontab(self, expr, expected_repr, timezone): trigger = CronTrigger.from_crontab(expr, timezone) assert repr(trigger) == expected_repr class TestDateTrigger(object): @pytest.mark.parametrize('run_date,alter_tz,previous,now,expected', [ (datetime(2009, 7, 6), None, None, datetime(2008, 5, 4), datetime(2009, 7, 6)), (datetime(2009, 7, 6), None, None, datetime(2009, 7, 6), datetime(2009, 7, 6)), (datetime(2009, 7, 6), None, None, datetime(2009, 9, 2), datetime(2009, 7, 6)), ('2009-7-6', None, None, datetime(2009, 9, 2), datetime(2009, 7, 6)), (datetime(2009, 7, 6), None, datetime(2009, 7, 6), datetime(2009, 9, 2), None), (datetime(2009, 7, 5, 22), pytz.FixedOffset(-60), datetime(2009, 7, 6), datetime(2009, 7, 6), None), (None, pytz.FixedOffset(-120), None, datetime(2011, 4, 3, 18, 40), datetime(2011, 4, 3, 18, 40)) ], ids=['earlier', 'exact', 'later', 'as text', 'previously fired', 'alternate timezone', 'current_time']) def test_get_next_fire_time(self, run_date, alter_tz, previous, now, expected, timezone, freeze_time): trigger = DateTrigger(run_date, alter_tz or timezone) previous = timezone.localize(previous) if previous else None now = timezone.localize(now) expected = timezone.localize(expected) if expected else None assert trigger.get_next_fire_time(previous, now) == expected @pytest.mark.parametrize('is_dst', [True, False], ids=['daylight saving', 'standard time']) def test_dst_change(self, is_dst): """ Test that DateTrigger works during the ambiguous "fall-back" DST period. Note that you should explicitly compare datetimes as strings to avoid the internal datetime comparison which would test for equality in the UTC timezone. """ eastern = pytz.timezone('US/Eastern') run_date = eastern.localize(datetime(2013, 10, 3, 1, 5), is_dst=is_dst) fire_date = eastern.normalize(run_date + timedelta(minutes=55)) trigger = DateTrigger(run_date=fire_date, timezone=eastern) assert str(trigger.get_next_fire_time(None, fire_date)) == str(fire_date) def test_repr(self, timezone): trigger = DateTrigger(datetime(2009, 7, 6), timezone) assert repr(trigger) == "" def test_str(self, timezone): trigger = DateTrigger(datetime(2009, 7, 6), timezone) assert str(trigger) == "date[2009-07-06 00:00:00 CEST]" def test_pickle(self, timezone): """Test that the trigger is pickleable.""" trigger = DateTrigger(date(2016, 4, 3), timezone=timezone) data = pickle.dumps(trigger, 2) trigger2 = pickle.loads(data) assert trigger2.run_date == trigger.run_date class TestIntervalTrigger(object): @pytest.fixture() def trigger(self, timezone): return IntervalTrigger(seconds=1, start_date=datetime(2009, 8, 4, second=2), timezone=timezone) def test_invalid_interval(self, timezone): pytest.raises(TypeError, IntervalTrigger, '1-6', timezone=timezone) def test_start_end_times_string(self, timezone, monkeypatch): monkeypatch.setattr('apscheduler.triggers.interval.get_localzone', Mock(return_value=timezone)) trigger = IntervalTrigger(start_date='2016-11-05 05:06:53', end_date='2017-11-05 05:11:32') assert trigger.start_date == timezone.localize(datetime(2016, 11, 5, 5, 6, 53)) assert trigger.end_date == timezone.localize(datetime(2017, 11, 5, 5, 11, 32)) def test_before(self, trigger, timezone): """Tests that if "start_date" is later than "now", it will return start_date.""" now = trigger.start_date - timedelta(seconds=2) assert trigger.get_next_fire_time(None, now) == trigger.start_date def test_within(self, trigger, timezone): """ Tests that if "now" is between "start_date" and the next interval, it will return the next interval. """ now = trigger.start_date + timedelta(microseconds=1000) assert trigger.get_next_fire_time(None, now) == trigger.start_date + trigger.interval def test_no_start_date(self, timezone): trigger = IntervalTrigger(seconds=2, timezone=timezone) now = datetime.now(timezone) assert (trigger.get_next_fire_time(None, now) - now) <= timedelta(seconds=2) def test_different_tz(self, trigger, timezone): alter_tz = pytz.FixedOffset(-60) start_date = alter_tz.localize(datetime(2009, 8, 3, 22, second=2, microsecond=1000)) correct_next_date = timezone.localize(datetime(2009, 8, 4, 1, second=3)) assert trigger.get_next_fire_time(None, start_date) == correct_next_date def test_end_date(self, timezone): """Tests that the interval trigger won't return any datetimes past the set end time.""" start_date = timezone.localize(datetime(2014, 5, 26)) trigger = IntervalTrigger(minutes=5, start_date=start_date, end_date=datetime(2014, 5, 26, 0, 7), timezone=timezone) assert trigger.get_next_fire_time(None, start_date + timedelta(minutes=2)) == \ start_date.replace(minute=5) assert trigger.get_next_fire_time(None, start_date + timedelta(minutes=6)) is None def test_dst_change(self): """ Making sure that IntervalTrigger works during the ambiguous "fall-back" DST period. Note that you should explicitly compare datetimes as strings to avoid the internal datetime comparison which would test for equality in the UTC timezone. """ eastern = pytz.timezone('US/Eastern') start_date = datetime(2013, 3, 1) # Start within EDT trigger = IntervalTrigger(hours=1, start_date=start_date, timezone=eastern) datetime_edt = eastern.localize(datetime(2013, 3, 10, 1, 5), is_dst=False) correct_next_date = eastern.localize(datetime(2013, 3, 10, 3), is_dst=True) assert str(trigger.get_next_fire_time(None, datetime_edt)) == str(correct_next_date) datetime_est = eastern.localize(datetime(2013, 11, 3, 1, 5), is_dst=True) correct_next_date = eastern.localize(datetime(2013, 11, 3, 1), is_dst=False) assert str(trigger.get_next_fire_time(None, datetime_est)) == str(correct_next_date) def test_space_in_expr(self, timezone): trigger = CronTrigger(day='1-2, 4-7', timezone=timezone) assert repr(trigger) == "" def test_repr(self, trigger): if sys.version_info[:2] < (3, 7): timedelta_args = '0, 1' else: timedelta_args = 'seconds=1' assert repr(trigger) == ("".format(timedelta_args)) def test_str(self, trigger): assert str(trigger) == "interval[0:00:01]" def test_pickle(self, timezone): """Test that the trigger is pickleable.""" trigger = IntervalTrigger(weeks=2, days=6, minutes=13, seconds=2, start_date=date(2016, 4, 3), timezone=timezone, jitter=12) data = pickle.dumps(trigger, 2) trigger2 = pickle.loads(data) for attr in IntervalTrigger.__slots__: assert getattr(trigger2, attr) == getattr(trigger, attr) def test_jitter_produces_different_valid_results(self, timezone): trigger = IntervalTrigger(seconds=5, timezone=timezone, jitter=3) now = datetime.now(timezone) results = set() for _ in range(0, 100): next_fire_time = trigger.get_next_fire_time(None, now) results.add(next_fire_time) assert timedelta(seconds=2) <= (next_fire_time - now) <= timedelta(seconds=8) assert 1 < len(results) @pytest.mark.parametrize('trigger_args, start_date, start_date_dst, correct_next_date', [ ({'hours': 1}, datetime(2013, 3, 10, 1, 35), False, datetime(2013, 3, 10, 3, 35)), ({'hours': 1}, datetime(2013, 11, 3, 1, 35), True, datetime(2013, 11, 3, 1, 35)) ], ids=['interval_spring', 'interval_autumn']) def test_jitter_dst_change(self, trigger_args, start_date, start_date_dst, correct_next_date): timezone = pytz.timezone('US/Eastern') epsilon = timedelta(seconds=1) start_date = timezone.localize(start_date, is_dst=start_date_dst) trigger = IntervalTrigger(timezone=timezone, start_date=start_date, jitter=5, **trigger_args) correct_next_date = timezone.localize(correct_next_date, is_dst=not start_date_dst) for _ in range(0, 100): next_fire_time = trigger.get_next_fire_time(None, start_date + epsilon) assert abs(next_fire_time - correct_next_date) <= timedelta(seconds=5) def test_jitter_with_end_date(self, timezone): now = timezone.localize(datetime(2017, 11, 12, 6, 55, 58)) end_date = timezone.localize(datetime(2017, 11, 12, 6, 56, 0)) trigger = IntervalTrigger(seconds=5, jitter=5, end_date=end_date) for _ in range(0, 100): next_fire_time = trigger.get_next_fire_time(None, now) assert next_fire_time is None or next_fire_time <= end_date class TestAndTrigger(object): @pytest.fixture def trigger(self, timezone): return AndTrigger([ CronTrigger(month='5-8', day='6-15', end_date=timezone.localize(datetime(2017, 8, 10))), CronTrigger(month='6-9', day='*/3', end_date=timezone.localize(datetime(2017, 9, 7))) ]) @pytest.mark.parametrize('start_time, expected', [ (datetime(2017, 8, 6), datetime(2017, 8, 7)), (datetime(2017, 8, 10, 1), None) ], ids=['firstmatch', 'end']) def test_next_fire_time(self, trigger, timezone, start_time, expected): expected = timezone.localize(expected) if expected else None assert trigger.get_next_fire_time(None, timezone.localize(start_time)) == expected def test_jitter(self, trigger, timezone): trigger.jitter = 5 start_time = timezone.localize(datetime(2017, 8, 6)) expected = timezone.localize(datetime(2017, 8, 7)) for _ in range(100): next_fire_time = trigger.get_next_fire_time(None, start_time) assert abs(expected - next_fire_time) <= timedelta(seconds=5) @pytest.mark.parametrize('jitter', [None, 5], ids=['nojitter', 'jitter']) def test_repr(self, trigger, jitter): trigger.jitter = jitter jitter_part = ', jitter={}'.format(jitter) if jitter else '' assert repr(trigger) == ( ", ]{})>".format(jitter_part)) def test_str(self, trigger): assert str(trigger) == "and[cron[month='5-8', day='6-15'], cron[month='6-9', day='*/3']]" @pytest.mark.parametrize('jitter', [None, 5], ids=['nojitter', 'jitter']) def test_pickle(self, trigger, jitter): """Test that the trigger is pickleable.""" trigger.jitter = jitter data = pickle.dumps(trigger, 2) trigger2 = pickle.loads(data) for attr in BaseCombiningTrigger.__slots__: assert repr(getattr(trigger2, attr)) == repr(getattr(trigger, attr)) class TestOrTrigger(object): @pytest.fixture def trigger(self, timezone): return OrTrigger([ CronTrigger(month='5-8', day='6-15', end_date=timezone.localize(datetime(2017, 8, 10))), CronTrigger(month='6-9', day='*/3', end_date=timezone.localize(datetime(2017, 9, 7))) ]) @pytest.mark.parametrize('start_time, expected', [ (datetime(2017, 8, 6), datetime(2017, 8, 6)), (datetime(2017, 9, 7, 1), None) ], ids=['earliest', 'end']) def test_next_fire_time(self, trigger, timezone, start_time, expected): expected = timezone.localize(expected) if expected else None assert trigger.get_next_fire_time(None, timezone.localize(start_time)) == expected def test_jitter(self, trigger, timezone): trigger.jitter = 5 start_time = expected = timezone.localize(datetime(2017, 8, 6)) for _ in range(100): next_fire_time = trigger.get_next_fire_time(None, start_time) assert abs(expected - next_fire_time) <= timedelta(seconds=5) @pytest.mark.parametrize('jitter', [None, 5], ids=['nojitter', 'jitter']) def test_repr(self, trigger, jitter): trigger.jitter = jitter jitter_part = ', jitter={}'.format(jitter) if jitter else '' assert repr(trigger) == ( ", ]{})>".format(jitter_part)) def test_str(self, trigger): assert str(trigger) == "or[cron[month='5-8', day='6-15'], cron[month='6-9', day='*/3']]" @pytest.mark.parametrize('jitter', [None, 5], ids=['nojitter', 'jitter']) def test_pickle(self, trigger, jitter): """Test that the trigger is pickleable.""" trigger.jitter = jitter data = pickle.dumps(trigger, 2) trigger2 = pickle.loads(data) for attr in BaseCombiningTrigger.__slots__: assert repr(getattr(trigger2, attr)) == repr(getattr(trigger, attr)) apscheduler-3.6.3/tests/test_util.py000066400000000000000000000320171356022153700175610ustar00rootroot00000000000000# coding: utf-8 import platform from datetime import date, datetime, timedelta, tzinfo from functools import partial from types import ModuleType import pytest import pytz import six import sys from apscheduler.job import Job from apscheduler.util import ( asint, asbool, astimezone, convert_to_datetime, datetime_to_utc_timestamp, utc_timestamp_to_datetime, timedelta_seconds, datetime_ceil, get_callable_name, obj_to_ref, ref_to_obj, maybe_ref, check_callable_args, datetime_repr, repr_escape) from tests.conftest import minpython, maxpython try: from unittest.mock import Mock except ImportError: from mock import Mock class DummyClass(object): def meth(self): pass @staticmethod def staticmeth(): pass @classmethod def classmeth(cls): pass def __call__(self): pass class InnerDummyClass(object): @classmethod def innerclassmeth(cls): pass class InheritedDummyClass(Job): pass class TestAsint(object): @pytest.mark.parametrize('value', ['5s', 'shplse'], ids=['digit first', 'text']) def test_invalid_value(self, value): pytest.raises(ValueError, asint, value) def test_number(self): assert asint('539') == 539 def test_none(self): assert asint(None) is None class TestAsbool(object): @pytest.mark.parametrize( 'value', [' True', 'true ', 'Yes', ' yes ', '1 ', True], ids=['capital true', 'lowercase true', 'capital yes', 'lowercase yes', 'one', 'True']) def test_true(self, value): assert asbool(value) is True @pytest.mark.parametrize( 'value', [' False', 'false ', 'No', ' no ', '0 ', False], ids=['capital', 'lowercase false', 'capital no', 'lowercase no', 'zero', 'False']) def test_false(self, value): assert asbool(value) is False def test_bad_value(self): pytest.raises(ValueError, asbool, 'yep') class TestAstimezone(object): def test_str(self): value = astimezone('Europe/Helsinki') assert isinstance(value, tzinfo) def test_tz(self): tz = pytz.timezone('Europe/Helsinki') value = astimezone(tz) assert tz is value def test_none(self): assert astimezone(None) is None def test_bad_timezone_type(self): exc = pytest.raises(TypeError, astimezone, tzinfo()) assert 'Only timezones from the pytz library are supported' in str(exc.value) def test_bad_local_timezone(self): zone = Mock(tzinfo, localize=None, normalize=None, zone='local') exc = pytest.raises(ValueError, astimezone, zone) assert 'Unable to determine the name of the local timezone' in str(exc.value) def test_bad_value(self): exc = pytest.raises(TypeError, astimezone, 4) assert 'Expected tzinfo, got int instead' in str(exc.value) class TestConvertToDatetime(object): @pytest.mark.parametrize('input,expected', [ (None, None), (date(2009, 8, 1), datetime(2009, 8, 1)), (datetime(2009, 8, 1, 5, 6, 12), datetime(2009, 8, 1, 5, 6, 12)), ('2009-8-1', datetime(2009, 8, 1)), ('2009-8-1 5:16:12', datetime(2009, 8, 1, 5, 16, 12)), ('2009-8-1T5:16:12Z', datetime(2009, 8, 1, 5, 16, 12, tzinfo=pytz.utc)), ('2009-8-1T5:16:12+02:30', pytz.FixedOffset(150).localize(datetime(2009, 8, 1, 5, 16, 12))), ('2009-8-1T5:16:12-05:30', pytz.FixedOffset(-330).localize(datetime(2009, 8, 1, 5, 16, 12))), (pytz.FixedOffset(-60).localize(datetime(2009, 8, 1)), pytz.FixedOffset(-60).localize(datetime(2009, 8, 1))) ], ids=['None', 'date', 'datetime', 'date as text', 'datetime as text', 'utc', 'tzoffset', 'negtzoffset', 'existing tzinfo']) def test_date(self, timezone, input, expected): returned = convert_to_datetime(input, timezone, None) if expected is not None: assert isinstance(returned, datetime) expected = timezone.localize(expected) if not expected.tzinfo else expected assert returned == expected def test_invalid_input_type(self, timezone): exc = pytest.raises(TypeError, convert_to_datetime, 92123, timezone, 'foo') assert str(exc.value) == 'Unsupported type for foo: int' def test_invalid_input_value(self, timezone): exc = pytest.raises(ValueError, convert_to_datetime, '19700-12-1', timezone, None) assert str(exc.value) == 'Invalid date string' def test_missing_timezone(self): exc = pytest.raises(ValueError, convert_to_datetime, '2009-8-1', None, 'argname') assert str(exc.value) == ('The "tz" argument must be specified if argname has no timezone ' 'information') def test_text_timezone(self): returned = convert_to_datetime('2009-8-1', 'UTC', None) assert returned == datetime(2009, 8, 1, tzinfo=pytz.utc) def test_bad_timezone(self): exc = pytest.raises(TypeError, convert_to_datetime, '2009-8-1', tzinfo(), None) assert str(exc.value) == ('Only pytz timezones are supported (need the localize() and ' 'normalize() methods)') def test_datetime_to_utc_timestamp(timezone): dt = timezone.localize(datetime(2014, 3, 12, 5, 40, 13, 254012)) timestamp = datetime_to_utc_timestamp(dt) dt2 = utc_timestamp_to_datetime(timestamp) assert dt2 == dt def test_timedelta_seconds(): delta = timedelta(minutes=2, seconds=30) seconds = timedelta_seconds(delta) assert seconds == 150 @pytest.mark.parametrize('input,expected', [ (datetime(2009, 4, 7, 2, 10, 16, 4000), datetime(2009, 4, 7, 2, 10, 17)), (datetime(2009, 4, 7, 2, 10, 16), datetime(2009, 4, 7, 2, 10, 16)) ], ids=['milliseconds', 'exact']) def test_datetime_ceil(input, expected): assert datetime_ceil(input) == expected @pytest.mark.parametrize('input,expected', [ (None, 'None'), (pytz.timezone('Europe/Helsinki').localize(datetime(2014, 5, 30, 7, 12, 20)), '2014-05-30 07:12:20 EEST') ], ids=['None', 'datetime+tzinfo']) def test_datetime_repr(input, expected): assert datetime_repr(input) == expected class TestGetCallableName(object): @pytest.mark.parametrize('input,expected', [ (asint, 'asint'), (DummyClass.staticmeth, 'DummyClass.staticmeth' if hasattr(DummyClass, '__qualname__') else 'staticmeth'), (DummyClass.classmeth, 'DummyClass.classmeth'), (DummyClass.meth, 'meth' if sys.version_info[:2] == (3, 2) else 'DummyClass.meth'), (DummyClass().meth, 'DummyClass.meth'), (DummyClass, 'DummyClass'), (DummyClass(), 'DummyClass') ], ids=['function', 'static method', 'class method', 'unbounded method', 'bounded method', 'class', 'instance']) def test_inputs(self, input, expected): assert get_callable_name(input) == expected def test_bad_input(self): pytest.raises(TypeError, get_callable_name, object()) class TestObjToRef(object): @pytest.mark.parametrize('obj, error', [ (partial(DummyClass.meth), 'Cannot create a reference to a partial()'), (lambda: None, 'Cannot create a reference to a lambda') ], ids=['partial', 'lambda']) def test_errors(self, obj, error): exc = pytest.raises(ValueError, obj_to_ref, obj) assert str(exc.value) == error @pytest.mark.skipif(sys.version_info[:2] < (3, 3), reason='Requires __qualname__ (Python 3.3+)') def test_nested_function_error(self): def nested(): pass exc = pytest.raises(ValueError, obj_to_ref, nested) assert str(exc.value) == 'Cannot create a reference to a nested function' @pytest.mark.parametrize('input,expected', [ (DummyClass.meth, 'tests.test_util:DummyClass.meth'), (DummyClass.classmeth, 'tests.test_util:DummyClass.classmeth'), pytest.param( DummyClass.InnerDummyClass.innerclassmeth, 'tests.test_util:DummyClass.InnerDummyClass.innerclassmeth', marks=[pytest.mark.skipif(sys.version_info < (3, 3), reason="Requires __qualname__ (Python 3.3+)")] ), pytest.param( DummyClass.staticmeth, 'tests.test_util:DummyClass.staticmeth', marks=[pytest.mark.skipif(sys.version_info < (3, 3), reason="Requires __qualname__ (Python 3.3+)")] ), (timedelta, 'datetime:timedelta'), ], ids=['class method', 'inner class method', 'static method', 'inherited class method', 'timedelta']) def test_valid_refs(self, input, expected): assert obj_to_ref(input) == expected class TestRefToObj(object): def test_valid_ref(self): from logging.handlers import RotatingFileHandler assert ref_to_obj('logging.handlers:RotatingFileHandler') is RotatingFileHandler def test_complex_path(self): pkg1 = ModuleType('pkg1') pkg1.pkg2 = 'blah' pkg2 = ModuleType('pkg1.pkg2') pkg2.varname = 'test' sys.modules['pkg1'] = pkg1 sys.modules['pkg1.pkg2'] = pkg2 assert ref_to_obj('pkg1.pkg2:varname') == 'test' @pytest.mark.parametrize('input,error', [ (object(), TypeError), ('module', ValueError), ('module:blah', LookupError) ], ids=['raw object', 'module', 'module attribute']) def test_lookup_error(self, input, error): pytest.raises(error, ref_to_obj, input) @pytest.mark.parametrize('input,expected', [ ('datetime:timedelta', timedelta), (timedelta, timedelta) ], ids=['textref', 'direct']) def test_maybe_ref(input, expected): assert maybe_ref(input) == expected @pytest.mark.parametrize('input,expected', [ (b'T\xc3\xa9st'.decode('utf-8'), 'T\\xe9st' if six.PY2 else 'Tést'), (1, 1) ], ids=['string', 'int']) @maxpython(3) def test_repr_escape_py2(input, expected): assert repr_escape(input) == expected class TestCheckCallableArgs(object): def test_invalid_callable_args(self): """ Tests that attempting to create a job with an invalid number of arguments raises an exception. """ exc = pytest.raises(ValueError, check_callable_args, lambda x: None, [1, 2], {}) assert str(exc.value) == ( 'The list of positional arguments is longer than the target callable can handle ' '(allowed: 1, given in args: 2)') def test_invalid_callable_kwargs(self): """ Tests that attempting to schedule a job with unmatched keyword arguments raises an exception. """ exc = pytest.raises(ValueError, check_callable_args, lambda x: None, [], {'x': 0, 'y': 1}) assert str(exc.value) == ('The target callable does not accept the following keyword ' 'arguments: y') def test_missing_callable_args(self): """Tests that attempting to schedule a job with missing arguments raises an exception.""" exc = pytest.raises(ValueError, check_callable_args, lambda x, y, z: None, [1], {'y': 0}) assert str(exc.value) == 'The following arguments have not been supplied: z' def test_default_args(self): """Tests that default values for arguments are properly taken into account.""" exc = pytest.raises(ValueError, check_callable_args, lambda x, y, z=1: None, [1], {}) assert str(exc.value) == 'The following arguments have not been supplied: y' def test_conflicting_callable_args(self): """ Tests that attempting to schedule a job where the combination of args and kwargs are in conflict raises an exception. """ exc = pytest.raises(ValueError, check_callable_args, lambda x, y: None, [1, 2], {'y': 1}) assert str(exc.value) == 'The following arguments are supplied in both args and kwargs: y' def test_signature_positional_only(self): """Tests that a function where signature() fails is accepted.""" check_callable_args(object().__setattr__, ('blah', 1), {}) @minpython(3, 4) @pytest.mark.skipif(platform.python_implementation() == 'PyPy', reason='PyPy does not expose signatures of builtins') def test_positional_only_args(self): """ Tests that an attempt to use keyword arguments for positional-only arguments raises an exception. """ exc = pytest.raises(ValueError, check_callable_args, object.__setattr__, ['blah'], {'value': 1}) assert str(exc.value) == ('The following arguments cannot be given as keyword arguments: ' 'value') @minpython(3) def test_unfulfilled_kwargs(self): """ Tests that attempting to schedule a job where not all keyword-only arguments are fulfilled raises an exception. """ func = eval("lambda x, *, y, z=1: None") exc = pytest.raises(ValueError, check_callable_args, func, [1], {}) assert str(exc.value) == ('The following keyword-only arguments have not been supplied in ' 'kwargs: y') apscheduler-3.6.3/tests/test_util_py35.py000066400000000000000000000010051356022153700204320ustar00rootroot00000000000000from functools import partial from apscheduler.util import iscoroutinefunction_partial class TestIsCoroutineFunctionPartial: @staticmethod def not_a_coro(x): pass @staticmethod async def a_coro(x): pass def test_non_coro(self): assert not iscoroutinefunction_partial(self.not_a_coro) def test_coro(self): assert iscoroutinefunction_partial(self.a_coro) def test_coro_partial(self): assert iscoroutinefunction_partial(partial(self.a_coro, 1)) apscheduler-3.6.3/tox.ini000066400000000000000000000006501356022153700153420ustar00rootroot00000000000000[tox] envlist = py27, py34, py35, py36, py37, pypy, pypy3, flake8 skip_missing_interpreters = true [testenv] commands = pytest {posargs} extras = testing asyncio gevent mongodb redis rethinkdb sqlalchemy tornado twisted zookeeper deps = {py35,py36,py37}: PyQt5 [testenv:py34] deps = twisted < 19.7 [testenv:flake8] deps = flake8 commands = flake8 apscheduler tests skip_install = true