Primer commit del proyecto RSS

This commit is contained in:
jlimolina 2025-05-24 14:37:58 +02:00
commit 27c9515d29
1568 changed files with 252311 additions and 0 deletions

View file

@ -0,0 +1,52 @@
import sys
from apscheduler.executors.base import BaseExecutor, run_coroutine_job, run_job
from apscheduler.util import iscoroutinefunction_partial
class AsyncIOExecutor(BaseExecutor):
"""
Runs jobs in the default executor of the event loop.
If the job function is a native coroutine function, it is scheduled to be run directly in the
event loop as soon as possible. All other functions are run in the event loop's default
executor which is usually a thread pool.
Plugin alias: ``asyncio``
"""
def start(self, scheduler, alias):
super().start(scheduler, alias)
self._eventloop = scheduler._eventloop
self._pending_futures = set()
def shutdown(self, wait=True):
# There is no way to honor wait=True without converting this method into a coroutine method
for f in self._pending_futures:
if not f.done():
f.cancel()
self._pending_futures.clear()
def _do_submit_job(self, job, run_times):
def callback(f):
self._pending_futures.discard(f)
try:
events = f.result()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
if iscoroutinefunction_partial(job.func):
coro = run_coroutine_job(
job, job._jobstore_alias, run_times, self._logger.name
)
f = self._eventloop.create_task(coro)
else:
f = self._eventloop.run_in_executor(
None, run_job, job, job._jobstore_alias, run_times, self._logger.name
)
f.add_done_callback(callback)
self._pending_futures.add(f)

View file

@ -0,0 +1,205 @@
import logging
import sys
import traceback
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from traceback import format_tb
from apscheduler.events import (
EVENT_JOB_ERROR,
EVENT_JOB_EXECUTED,
EVENT_JOB_MISSED,
JobExecutionEvent,
)
class MaxInstancesReachedError(Exception):
def __init__(self, job):
super().__init__(
'Job "%s" has already reached its maximum number of instances (%d)'
% (job.id, job.max_instances)
)
class BaseExecutor(metaclass=ABCMeta):
"""Abstract base class that defines the interface that every executor must implement."""
_scheduler = None
_lock = None
_logger = logging.getLogger("apscheduler.executors")
def __init__(self):
super().__init__()
self._instances = defaultdict(lambda: 0)
def start(self, scheduler, alias):
"""
Called by the scheduler when the scheduler is being started or when the executor is being
added to an already running scheduler.
:param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
this executor
:param str|unicode alias: alias of this executor as it was assigned to the scheduler
"""
self._scheduler = scheduler
self._lock = scheduler._create_lock()
self._logger = logging.getLogger(f"apscheduler.executors.{alias}")
def shutdown(self, wait=True):
"""
Shuts down this executor.
:param bool wait: ``True`` to wait until all submitted jobs
have been executed
"""
def submit_job(self, job, run_times):
"""
Submits job for execution.
:param Job job: job to execute
:param list[datetime] run_times: list of datetimes specifying
when the job should have been run
:raises MaxInstancesReachedError: if the maximum number of
allowed instances for this job has been reached
"""
assert self._lock is not None, "This executor has not been started yet"
with self._lock:
if self._instances[job.id] >= job.max_instances:
raise MaxInstancesReachedError(job)
self._do_submit_job(job, run_times)
self._instances[job.id] += 1
@abstractmethod
def _do_submit_job(self, job, run_times):
"""Performs the actual task of scheduling `run_job` to be called."""
def _run_job_success(self, job_id, events):
"""
Called by the executor with the list of generated events when :func:`run_job` has been
successfully called.
"""
with self._lock:
self._instances[job_id] -= 1
if self._instances[job_id] == 0:
del self._instances[job_id]
for event in events:
self._scheduler._dispatch_event(event)
def _run_job_error(self, job_id, exc, traceback=None):
"""Called by the executor with the exception if there is an error calling `run_job`."""
with self._lock:
self._instances[job_id] -= 1
if self._instances[job_id] == 0:
del self._instances[job_id]
exc_info = (exc.__class__, exc, traceback)
self._logger.error("Error running job %s", job_id, exc_info=exc_info)
def run_job(job, jobstore_alias, run_times, logger_name):
"""
Called by executors to run the job. Returns a list of scheduler events to be dispatched by the
scheduler.
"""
events = []
logger = logging.getLogger(logger_name)
for run_time in run_times:
# See if the job missed its run time window, and handle
# possible misfires accordingly
if job.misfire_grace_time is not None:
difference = datetime.now(timezone.utc) - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
events.append(
JobExecutionEvent(
EVENT_JOB_MISSED, job.id, jobstore_alias, run_time
)
)
logger.warning('Run time of job "%s" was missed by %s', job, difference)
continue
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
try:
retval = job.func(*job.args, **job.kwargs)
except BaseException:
exc, tb = sys.exc_info()[1:]
formatted_tb = "".join(format_tb(tb))
events.append(
JobExecutionEvent(
EVENT_JOB_ERROR,
job.id,
jobstore_alias,
run_time,
exception=exc,
traceback=formatted_tb,
)
)
logger.exception('Job "%s" raised an exception', job)
# This is to prevent cyclic references that would lead to memory leaks
traceback.clear_frames(tb)
del tb
else:
events.append(
JobExecutionEvent(
EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval
)
)
logger.info('Job "%s" executed successfully', job)
return events
async def run_coroutine_job(job, jobstore_alias, run_times, logger_name):
"""Coroutine version of run_job()."""
events = []
logger = logging.getLogger(logger_name)
for run_time in run_times:
# See if the job missed its run time window, and handle possible misfires accordingly
if job.misfire_grace_time is not None:
difference = datetime.now(timezone.utc) - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
events.append(
JobExecutionEvent(
EVENT_JOB_MISSED, job.id, jobstore_alias, run_time
)
)
logger.warning('Run time of job "%s" was missed by %s', job, difference)
continue
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
try:
retval = await job.func(*job.args, **job.kwargs)
except BaseException:
exc, tb = sys.exc_info()[1:]
formatted_tb = "".join(format_tb(tb))
events.append(
JobExecutionEvent(
EVENT_JOB_ERROR,
job.id,
jobstore_alias,
run_time,
exception=exc,
traceback=formatted_tb,
)
)
logger.exception('Job "%s" raised an exception', job)
traceback.clear_frames(tb)
else:
events.append(
JobExecutionEvent(
EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval
)
)
logger.info('Job "%s" executed successfully', job)
return events

View file

@ -0,0 +1,20 @@
import sys
from apscheduler.executors.base import BaseExecutor, run_job
class DebugExecutor(BaseExecutor):
"""
A special executor that executes the target callable directly instead of deferring it to a
thread or process.
Plugin alias: ``debug``
"""
def _do_submit_job(self, job, run_times):
try:
events = run_job(job, job._jobstore_alias, run_times, self._logger.name)
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)

View file

@ -0,0 +1,29 @@
import sys
from apscheduler.executors.base import BaseExecutor, run_job
try:
import gevent
except ImportError as exc: # pragma: nocover
raise ImportError("GeventExecutor requires gevent installed") from exc
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(
run_job, job, job._jobstore_alias, run_times, self._logger.name
).link(callback)

View file

@ -0,0 +1,82 @@
import concurrent.futures
import multiprocessing
from abc import abstractmethod
from concurrent.futures.process import BrokenProcessPool
from apscheduler.executors.base import BaseExecutor, run_job
class BasePoolExecutor(BaseExecutor):
@abstractmethod
def __init__(self, pool):
super().__init__()
self._pool = pool
def _do_submit_job(self, job, run_times):
def callback(f):
exc, tb = (
f.exception_info()
if hasattr(f, "exception_info")
else (f.exception(), getattr(f.exception(), "__traceback__", None))
)
if exc:
self._run_job_error(job.id, exc, tb)
else:
self._run_job_success(job.id, f.result())
f = self._pool.submit(
run_job, job, job._jobstore_alias, run_times, self._logger.name
)
f.add_done_callback(callback)
def shutdown(self, wait=True):
self._pool.shutdown(wait)
class ThreadPoolExecutor(BasePoolExecutor):
"""
An executor that runs jobs in a concurrent.futures thread pool.
Plugin alias: ``threadpool``
:param max_workers: the maximum number of spawned threads.
:param pool_kwargs: dict of keyword arguments to pass to the underlying
ThreadPoolExecutor constructor
"""
def __init__(self, max_workers=10, pool_kwargs=None):
pool_kwargs = pool_kwargs or {}
pool = concurrent.futures.ThreadPoolExecutor(int(max_workers), **pool_kwargs)
super().__init__(pool)
class ProcessPoolExecutor(BasePoolExecutor):
"""
An executor that runs jobs in a concurrent.futures process pool.
Plugin alias: ``processpool``
:param max_workers: the maximum number of spawned processes.
:param pool_kwargs: dict of keyword arguments to pass to the underlying
ProcessPoolExecutor constructor
"""
def __init__(self, max_workers=10, pool_kwargs=None):
self.pool_kwargs = pool_kwargs or {}
self.pool_kwargs.setdefault("mp_context", multiprocessing.get_context("spawn"))
pool = concurrent.futures.ProcessPoolExecutor(
int(max_workers), **self.pool_kwargs
)
super().__init__(pool)
def _do_submit_job(self, job, run_times):
try:
super()._do_submit_job(job, run_times)
except BrokenProcessPool:
self._logger.warning(
"Process pool is broken; replacing pool with a fresh instance"
)
self._pool = self._pool.__class__(
self._pool._max_workers, **self.pool_kwargs
)
super()._do_submit_job(job, run_times)

View file

@ -0,0 +1,49 @@
import sys
from concurrent.futures import ThreadPoolExecutor
from tornado.gen import convert_yielded
from apscheduler.executors.base import BaseExecutor, run_coroutine_job, run_job
from apscheduler.util import iscoroutinefunction_partial
class TornadoExecutor(BaseExecutor):
"""
Runs jobs either in a thread pool or directly on the I/O loop.
If the job function is a native coroutine function, it is scheduled to be run directly in the
I/O loop as soon as possible. All other functions are run in a thread pool.
Plugin alias: ``tornado``
:param int max_workers: maximum number of worker threads in the thread pool
"""
def __init__(self, max_workers=10):
super().__init__()
self.executor = ThreadPoolExecutor(max_workers)
def start(self, scheduler, alias):
super().start(scheduler, alias)
self._ioloop = scheduler._ioloop
def _do_submit_job(self, job, run_times):
def callback(f):
try:
events = f.result()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
if iscoroutinefunction_partial(job.func):
f = run_coroutine_job(
job, job._jobstore_alias, run_times, self._logger.name
)
else:
f = self.executor.submit(
run_job, job, job._jobstore_alias, run_times, self._logger.name
)
f = convert_yielded(f)
f.add_done_callback(callback)

View file

@ -0,0 +1,24 @@
from apscheduler.executors.base import BaseExecutor, run_job
class TwistedExecutor(BaseExecutor):
"""
Runs jobs in the reactor's thread pool.
Plugin alias: ``twisted``
"""
def start(self, scheduler, alias):
super().start(scheduler, alias)
self._reactor = scheduler._reactor
def _do_submit_job(self, job, run_times):
def callback(success, result):
if success:
self._run_job_success(job.id, result)
else:
self._run_job_error(job.id, result.value, result.tb)
self._reactor.getThreadPool().callInThreadWithCallback(
callback, run_job, job, job._jobstore_alias, run_times, self._logger.name
)