Primer commit del proyecto RSS
This commit is contained in:
commit
27c9515d29
1568 changed files with 252311 additions and 0 deletions
|
|
@ -0,0 +1,12 @@
|
|||
class SchedulerAlreadyRunningError(Exception):
|
||||
"""Raised when attempting to start or configure the scheduler when it's already running."""
|
||||
|
||||
def __str__(self):
|
||||
return "Scheduler is already running"
|
||||
|
||||
|
||||
class SchedulerNotRunningError(Exception):
|
||||
"""Raised when attempting to shutdown the scheduler when it's not running."""
|
||||
|
||||
def __str__(self):
|
||||
return "Scheduler is not running"
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,67 @@
|
|||
import asyncio
|
||||
from functools import partial, wraps
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.util import maybe_ref
|
||||
|
||||
|
||||
def run_in_event_loop(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
wrapped = partial(func, self, *args, **kwargs)
|
||||
self._eventloop.call_soon_threadsafe(wrapped)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class AsyncIOScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs on an asyncio (:pep:`3156`) event loop.
|
||||
|
||||
The default executor can run jobs based on native coroutines (``async def``).
|
||||
|
||||
Extra options:
|
||||
|
||||
============== =============================================================
|
||||
``event_loop`` AsyncIO event loop to use (defaults to the global event loop)
|
||||
============== =============================================================
|
||||
"""
|
||||
|
||||
_eventloop = None
|
||||
_timeout = None
|
||||
|
||||
def start(self, paused=False):
|
||||
if not self._eventloop:
|
||||
self._eventloop = asyncio.get_running_loop()
|
||||
|
||||
super().start(paused)
|
||||
|
||||
@run_in_event_loop
|
||||
def shutdown(self, wait=True):
|
||||
super().shutdown(wait)
|
||||
self._stop_timer()
|
||||
|
||||
def _configure(self, config):
|
||||
self._eventloop = maybe_ref(config.pop("event_loop", None))
|
||||
super()._configure(config)
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._timeout:
|
||||
self._timeout.cancel()
|
||||
del self._timeout
|
||||
|
||||
@run_in_event_loop
|
||||
def wakeup(self):
|
||||
self._stop_timer()
|
||||
wait_seconds = self._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
||||
|
||||
def _create_default_executor(self):
|
||||
from apscheduler.executors.asyncio import AsyncIOExecutor
|
||||
|
||||
return AsyncIOExecutor()
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
from threading import Event, Thread
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.schedulers.blocking import BlockingScheduler
|
||||
from apscheduler.util import asbool
|
||||
|
||||
|
||||
class BackgroundScheduler(BlockingScheduler):
|
||||
"""
|
||||
A scheduler that runs in the background using a separate thread
|
||||
(:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately).
|
||||
|
||||
Extra options:
|
||||
|
||||
========== =============================================================================
|
||||
``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``, see
|
||||
`the documentation
|
||||
<https://docs.python.org/3.4/library/threading.html#thread-objects>`_
|
||||
for further details)
|
||||
========== =============================================================================
|
||||
"""
|
||||
|
||||
_thread = None
|
||||
|
||||
def _configure(self, config):
|
||||
self._daemon = asbool(config.pop("daemon", True))
|
||||
super()._configure(config)
|
||||
|
||||
def start(self, *args, **kwargs):
|
||||
if self._event is None or self._event.is_set():
|
||||
self._event = Event()
|
||||
|
||||
BaseScheduler.start(self, *args, **kwargs)
|
||||
self._thread = Thread(
|
||||
target=self._main_loop, name="APScheduler", daemon=self._daemon
|
||||
)
|
||||
self._thread.start()
|
||||
|
||||
def shutdown(self, *args, **kwargs):
|
||||
super().shutdown(*args, **kwargs)
|
||||
self._thread.join()
|
||||
del self._thread
|
||||
1264
venv/lib/python3.12/site-packages/apscheduler/schedulers/base.py
Normal file
1264
venv/lib/python3.12/site-packages/apscheduler/schedulers/base.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,33 @@
|
|||
from threading import TIMEOUT_MAX, Event
|
||||
|
||||
from apscheduler.schedulers.base import STATE_STOPPED, BaseScheduler
|
||||
|
||||
|
||||
class BlockingScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs in the foreground
|
||||
(:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block).
|
||||
"""
|
||||
|
||||
_event = None
|
||||
|
||||
def start(self, *args, **kwargs):
|
||||
if self._event is None or self._event.is_set():
|
||||
self._event = Event()
|
||||
|
||||
super().start(*args, **kwargs)
|
||||
self._main_loop()
|
||||
|
||||
def shutdown(self, wait=True):
|
||||
super().shutdown(wait)
|
||||
self._event.set()
|
||||
|
||||
def _main_loop(self):
|
||||
wait_seconds = TIMEOUT_MAX
|
||||
while self.state != STATE_STOPPED:
|
||||
self._event.wait(wait_seconds)
|
||||
self._event.clear()
|
||||
wait_seconds = self._process_jobs()
|
||||
|
||||
def wakeup(self):
|
||||
self._event.set()
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.schedulers.blocking import BlockingScheduler
|
||||
|
||||
try:
|
||||
import gevent
|
||||
from gevent.event import Event
|
||||
from gevent.lock import RLock
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("GeventScheduler requires gevent installed") from exc
|
||||
|
||||
|
||||
class GeventScheduler(BlockingScheduler):
|
||||
"""A scheduler that runs as a Gevent greenlet."""
|
||||
|
||||
_greenlet = None
|
||||
|
||||
def start(self, *args, **kwargs):
|
||||
self._event = Event()
|
||||
BaseScheduler.start(self, *args, **kwargs)
|
||||
self._greenlet = gevent.spawn(self._main_loop)
|
||||
return self._greenlet
|
||||
|
||||
def shutdown(self, *args, **kwargs):
|
||||
super().shutdown(*args, **kwargs)
|
||||
self._greenlet.join()
|
||||
del self._greenlet
|
||||
|
||||
def _create_lock(self):
|
||||
return RLock()
|
||||
|
||||
def _create_default_executor(self):
|
||||
from apscheduler.executors.gevent import GeventExecutor
|
||||
|
||||
return GeventExecutor()
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
from importlib import import_module
|
||||
from itertools import product
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
|
||||
for version, pkgname in product(range(6, 1, -1), ("PySide", "PyQt")):
|
||||
try:
|
||||
qtcore = import_module(pkgname + str(version) + ".QtCore")
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
QTimer = qtcore.QTimer
|
||||
break
|
||||
else:
|
||||
raise ImportError("QtScheduler requires either PySide/PyQt (v6 to v2) installed")
|
||||
|
||||
|
||||
class QtScheduler(BaseScheduler):
|
||||
"""A scheduler that runs in a Qt event loop."""
|
||||
|
||||
_timer = None
|
||||
|
||||
def shutdown(self, *args, **kwargs):
|
||||
super().shutdown(*args, **kwargs)
|
||||
self._stop_timer()
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
wait_time = min(int(wait_seconds * 1000), 2147483647)
|
||||
self._timer = QTimer.singleShot(wait_time, self._process_jobs)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._timer:
|
||||
if self._timer.isActive():
|
||||
self._timer.stop()
|
||||
del self._timer
|
||||
|
||||
def wakeup(self):
|
||||
self._start_timer(0)
|
||||
|
||||
def _process_jobs(self):
|
||||
wait_seconds = super()._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
from datetime import timedelta
|
||||
from functools import wraps
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.util import maybe_ref
|
||||
|
||||
try:
|
||||
from tornado.ioloop import IOLoop
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("TornadoScheduler requires tornado installed") from exc
|
||||
|
||||
|
||||
def run_in_ioloop(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
self._ioloop.add_callback(func, self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class TornadoScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs on a Tornado IOLoop.
|
||||
|
||||
The default executor can run jobs based on native coroutines (``async def``).
|
||||
|
||||
=========== ===============================================================
|
||||
``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop)
|
||||
=========== ===============================================================
|
||||
"""
|
||||
|
||||
_ioloop = None
|
||||
_timeout = None
|
||||
|
||||
@run_in_ioloop
|
||||
def shutdown(self, wait=True):
|
||||
super().shutdown(wait)
|
||||
self._stop_timer()
|
||||
|
||||
def _configure(self, config):
|
||||
self._ioloop = maybe_ref(config.pop("io_loop", None)) or IOLoop.current()
|
||||
super()._configure(config)
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
self._timeout = self._ioloop.add_timeout(
|
||||
timedelta(seconds=wait_seconds), self.wakeup
|
||||
)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._timeout:
|
||||
self._ioloop.remove_timeout(self._timeout)
|
||||
del self._timeout
|
||||
|
||||
def _create_default_executor(self):
|
||||
from apscheduler.executors.tornado import TornadoExecutor
|
||||
|
||||
return TornadoExecutor()
|
||||
|
||||
@run_in_ioloop
|
||||
def wakeup(self):
|
||||
self._stop_timer()
|
||||
wait_seconds = self._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
from functools import wraps
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
from apscheduler.util import maybe_ref
|
||||
|
||||
try:
|
||||
from twisted.internet import reactor as default_reactor
|
||||
except ImportError as exc: # pragma: nocover
|
||||
raise ImportError("TwistedScheduler requires Twisted installed") from exc
|
||||
|
||||
|
||||
def run_in_reactor(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
self._reactor.callFromThread(func, self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class TwistedScheduler(BaseScheduler):
|
||||
"""
|
||||
A scheduler that runs on a Twisted reactor.
|
||||
|
||||
Extra options:
|
||||
|
||||
=========== ========================================================
|
||||
``reactor`` Reactor instance to use (defaults to the global reactor)
|
||||
=========== ========================================================
|
||||
"""
|
||||
|
||||
_reactor = None
|
||||
_delayedcall = None
|
||||
|
||||
def _configure(self, config):
|
||||
self._reactor = maybe_ref(config.pop("reactor", default_reactor))
|
||||
super()._configure(config)
|
||||
|
||||
@run_in_reactor
|
||||
def shutdown(self, wait=True):
|
||||
super().shutdown(wait)
|
||||
self._stop_timer()
|
||||
|
||||
def _start_timer(self, wait_seconds):
|
||||
self._stop_timer()
|
||||
if wait_seconds is not None:
|
||||
self._delayedcall = self._reactor.callLater(wait_seconds, self.wakeup)
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._delayedcall and self._delayedcall.active():
|
||||
self._delayedcall.cancel()
|
||||
del self._delayedcall
|
||||
|
||||
@run_in_reactor
|
||||
def wakeup(self):
|
||||
self._stop_timer()
|
||||
wait_seconds = self._process_jobs()
|
||||
self._start_timer(wait_seconds)
|
||||
|
||||
def _create_default_executor(self):
|
||||
from apscheduler.executors.twisted import TwistedExecutor
|
||||
|
||||
return TwistedExecutor()
|
||||
Loading…
Add table
Add a link
Reference in a new issue