Primer commit del proyecto RSS

This commit is contained in:
jlimolina 2025-05-24 14:37:58 +02:00
commit 27c9515d29
1568 changed files with 252311 additions and 0 deletions

247
venv/bin/Activate.ps1 Normal file
View file

@ -0,0 +1,247 @@
<#
.Synopsis
Activate a Python virtual environment for the current PowerShell session.
.Description
Pushes the python executable for a virtual environment to the front of the
$Env:PATH environment variable and sets the prompt to signify that you are
in a Python virtual environment. Makes use of the command line switches as
well as the `pyvenv.cfg` file values present in the virtual environment.
.Parameter VenvDir
Path to the directory that contains the virtual environment to activate. The
default value for this is the parent of the directory that the Activate.ps1
script is located within.
.Parameter Prompt
The prompt prefix to display when this virtual environment is activated. By
default, this prompt is the name of the virtual environment folder (VenvDir)
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
.Example
Activate.ps1
Activates the Python virtual environment that contains the Activate.ps1 script.
.Example
Activate.ps1 -Verbose
Activates the Python virtual environment that contains the Activate.ps1 script,
and shows extra information about the activation as it executes.
.Example
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
Activates the Python virtual environment located in the specified location.
.Example
Activate.ps1 -Prompt "MyPython"
Activates the Python virtual environment that contains the Activate.ps1 script,
and prefixes the current prompt with the specified string (surrounded in
parentheses) while the virtual environment is active.
.Notes
On Windows, it may be required to enable this Activate.ps1 script by setting the
execution policy for the user. You can do this by issuing the following PowerShell
command:
PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
For more information on Execution Policies:
https://go.microsoft.com/fwlink/?LinkID=135170
#>
Param(
[Parameter(Mandatory = $false)]
[String]
$VenvDir,
[Parameter(Mandatory = $false)]
[String]
$Prompt
)
<# Function declarations --------------------------------------------------- #>
<#
.Synopsis
Remove all shell session elements added by the Activate script, including the
addition of the virtual environment's Python executable from the beginning of
the PATH variable.
.Parameter NonDestructive
If present, do not remove this function from the global namespace for the
session.
#>
function global:deactivate ([switch]$NonDestructive) {
# Revert to original values
# The prior prompt:
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
}
# The prior PYTHONHOME:
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
}
# The prior PATH:
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
}
# Just remove the VIRTUAL_ENV altogether:
if (Test-Path -Path Env:VIRTUAL_ENV) {
Remove-Item -Path env:VIRTUAL_ENV
}
# Just remove VIRTUAL_ENV_PROMPT altogether.
if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
Remove-Item -Path env:VIRTUAL_ENV_PROMPT
}
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
}
# Leave deactivate function in the global namespace if requested:
if (-not $NonDestructive) {
Remove-Item -Path function:deactivate
}
}
<#
.Description
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
given folder, and returns them in a map.
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
two strings separated by `=` (with any amount of whitespace surrounding the =)
then it is considered a `key = value` line. The left hand string is the key,
the right hand is the value.
If the value starts with a `'` or a `"` then the first and last character is
stripped from the value before being captured.
.Parameter ConfigDir
Path to the directory that contains the `pyvenv.cfg` file.
#>
function Get-PyVenvConfig(
[String]
$ConfigDir
) {
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
# An empty map will be returned if no config file is found.
$pyvenvConfig = @{ }
if ($pyvenvConfigPath) {
Write-Verbose "File exists, parse `key = value` lines"
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
$pyvenvConfigContent | ForEach-Object {
$keyval = $PSItem -split "\s*=\s*", 2
if ($keyval[0] -and $keyval[1]) {
$val = $keyval[1]
# Remove extraneous quotations around a string value.
if ("'""".Contains($val.Substring(0, 1))) {
$val = $val.Substring(1, $val.Length - 2)
}
$pyvenvConfig[$keyval[0]] = $val
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
}
}
}
return $pyvenvConfig
}
<# Begin Activate script --------------------------------------------------- #>
# Determine the containing directory of this script
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
$VenvExecDir = Get-Item -Path $VenvExecPath
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
# Set values required in priority: CmdLine, ConfigFile, Default
# First, get the location of the virtual environment, it might not be
# VenvExecDir if specified on the command line.
if ($VenvDir) {
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
}
else {
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
Write-Verbose "VenvDir=$VenvDir"
}
# Next, read the `pyvenv.cfg` file to determine any required value such
# as `prompt`.
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
# Next, set the prompt from the command line, or the config file, or
# just use the name of the virtual environment folder.
if ($Prompt) {
Write-Verbose "Prompt specified as argument, using '$Prompt'"
}
else {
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
$Prompt = $pyvenvCfg['prompt'];
}
else {
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
$Prompt = Split-Path -Path $venvDir -Leaf
}
}
Write-Verbose "Prompt = '$Prompt'"
Write-Verbose "VenvDir='$VenvDir'"
# Deactivate any currently active virtual environment, but leave the
# deactivate function in place.
deactivate -nondestructive
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
# that there is an activated venv.
$env:VIRTUAL_ENV = $VenvDir
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
Write-Verbose "Setting prompt to '$Prompt'"
# Set the prompt to include the env name
# Make sure _OLD_VIRTUAL_PROMPT is global
function global:_OLD_VIRTUAL_PROMPT { "" }
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
function global:prompt {
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
_OLD_VIRTUAL_PROMPT
}
$env:VIRTUAL_ENV_PROMPT = $Prompt
}
# Clear PYTHONHOME
if (Test-Path -Path Env:PYTHONHOME) {
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
Remove-Item -Path Env:PYTHONHOME
}
# Add the venv to the PATH
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"

70
venv/bin/activate Normal file
View file

@ -0,0 +1,70 @@
# This file must be used with "source bin/activate" *from bash*
# You cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
PATH="${_OLD_VIRTUAL_PATH:-}"
export PATH
unset _OLD_VIRTUAL_PATH
fi
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
export PYTHONHOME
unset _OLD_VIRTUAL_PYTHONHOME
fi
# Call hash to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
hash -r 2> /dev/null
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
PS1="${_OLD_VIRTUAL_PS1:-}"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
unset VIRTUAL_ENV_PROMPT
if [ ! "${1:-}" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelevant variables
deactivate nondestructive
# on Windows, a path can contain colons and backslashes and has to be converted:
if [ "${OSTYPE:-}" = "cygwin" ] || [ "${OSTYPE:-}" = "msys" ] ; then
# transform D:\path\to\venv to /d/path/to/venv on MSYS
# and to /cygdrive/d/path/to/venv on Cygwin
export VIRTUAL_ENV=$(cygpath /home/x/rss/venv)
else
# use the path as-is
export VIRTUAL_ENV=/home/x/rss/venv
fi
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/"bin":$PATH"
export PATH
# unset PYTHONHOME if set
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
# could use `if (set -u; : $PYTHONHOME) ;` in bash
if [ -n "${PYTHONHOME:-}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
unset PYTHONHOME
fi
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
_OLD_VIRTUAL_PS1="${PS1:-}"
PS1='(venv) '"${PS1:-}"
export PS1
VIRTUAL_ENV_PROMPT='(venv) '
export VIRTUAL_ENV_PROMPT
fi
# Call hash to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
hash -r 2> /dev/null

27
venv/bin/activate.csh Normal file
View file

@ -0,0 +1,27 @@
# This file must be used with "source bin/activate.csh" *from csh*.
# You cannot run it directly.
# Created by Davide Di Blasi <davidedb@gmail.com>.
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
# Unset irrelevant variables.
deactivate nondestructive
setenv VIRTUAL_ENV /home/x/rss/venv
set _OLD_VIRTUAL_PATH="$PATH"
setenv PATH "$VIRTUAL_ENV/"bin":$PATH"
set _OLD_VIRTUAL_PROMPT="$prompt"
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
set prompt = '(venv) '"$prompt"
setenv VIRTUAL_ENV_PROMPT '(venv) '
endif
alias pydoc python -m pydoc
rehash

69
venv/bin/activate.fish Normal file
View file

@ -0,0 +1,69 @@
# This file must be used with "source <venv>/bin/activate.fish" *from fish*
# (https://fishshell.com/). You cannot run it directly.
function deactivate -d "Exit virtual environment and return to normal shell environment"
# reset old environment variables
if test -n "$_OLD_VIRTUAL_PATH"
set -gx PATH $_OLD_VIRTUAL_PATH
set -e _OLD_VIRTUAL_PATH
end
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
set -e _OLD_VIRTUAL_PYTHONHOME
end
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
set -e _OLD_FISH_PROMPT_OVERRIDE
# prevents error when using nested fish instances (Issue #93858)
if functions -q _old_fish_prompt
functions -e fish_prompt
functions -c _old_fish_prompt fish_prompt
functions -e _old_fish_prompt
end
end
set -e VIRTUAL_ENV
set -e VIRTUAL_ENV_PROMPT
if test "$argv[1]" != "nondestructive"
# Self-destruct!
functions -e deactivate
end
end
# Unset irrelevant variables.
deactivate nondestructive
set -gx VIRTUAL_ENV /home/x/rss/venv
set -gx _OLD_VIRTUAL_PATH $PATH
set -gx PATH "$VIRTUAL_ENV/"bin $PATH
# Unset PYTHONHOME if set.
if set -q PYTHONHOME
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
set -e PYTHONHOME
end
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
# fish uses a function instead of an env var to generate the prompt.
# Save the current fish_prompt function as the function _old_fish_prompt.
functions -c fish_prompt _old_fish_prompt
# With the original prompt function renamed, we can override with our own.
function fish_prompt
# Save the return status of the last command.
set -l old_status $status
# Output the venv prompt; color taken from the blue of the Python logo.
printf "%s%s%s" (set_color 4B8BBE) '(venv) ' (set_color normal)
# Restore the return status of the previous command.
echo "exit $old_status" | .
# Output the original/"old" prompt.
_old_fish_prompt
end
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
set -gx VIRTUAL_ENV_PROMPT '(venv) '
end

8
venv/bin/flask Executable file
View file

@ -0,0 +1,8 @@
#!/home/x/rss/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
venv/bin/pip Executable file
View file

@ -0,0 +1,8 @@
#!/home/x/rss/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
venv/bin/pip3 Executable file
View file

@ -0,0 +1,8 @@
#!/home/x/rss/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

8
venv/bin/pip3.12 Executable file
View file

@ -0,0 +1,8 @@
#!/home/x/rss/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())

1
venv/bin/python Symbolic link
View file

@ -0,0 +1 @@
python3

1
venv/bin/python3 Symbolic link
View file

@ -0,0 +1 @@
/usr/bin/python3

1
venv/bin/python3.12 Symbolic link
View file

@ -0,0 +1 @@
python3

View file

@ -0,0 +1,19 @@
This is the MIT license: http://www.opensource.org/licenses/mit-license.php
Copyright (c) Alex Grönholm
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View file

@ -0,0 +1,147 @@
Metadata-Version: 2.1
Name: APScheduler
Version: 3.11.0
Summary: In-process task scheduler with Cron-like capabilities
Author-email: Alex Grönholm <alex.gronholm@nextday.fi>
License: MIT
Project-URL: Documentation, https://apscheduler.readthedocs.io/en/3.x/
Project-URL: Changelog, https://apscheduler.readthedocs.io/en/3.x/versionhistory.html
Project-URL: Source code, https://github.com/agronholm/apscheduler
Project-URL: Issue tracker, https://github.com/agronholm/apscheduler/issues
Keywords: scheduling,cron
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Classifier: Programming Language :: Python :: 3.13
Requires-Python: >=3.8
Description-Content-Type: text/x-rst
License-File: LICENSE.txt
Requires-Dist: tzlocal>=3.0
Requires-Dist: backports.zoneinfo; python_version < "3.9"
Provides-Extra: etcd
Requires-Dist: etcd3; extra == "etcd"
Requires-Dist: protobuf<=3.21.0; extra == "etcd"
Provides-Extra: gevent
Requires-Dist: gevent; extra == "gevent"
Provides-Extra: mongodb
Requires-Dist: pymongo>=3.0; extra == "mongodb"
Provides-Extra: redis
Requires-Dist: redis>=3.0; extra == "redis"
Provides-Extra: rethinkdb
Requires-Dist: rethinkdb>=2.4.0; extra == "rethinkdb"
Provides-Extra: sqlalchemy
Requires-Dist: sqlalchemy>=1.4; extra == "sqlalchemy"
Provides-Extra: tornado
Requires-Dist: tornado>=4.3; extra == "tornado"
Provides-Extra: twisted
Requires-Dist: twisted; extra == "twisted"
Provides-Extra: zookeeper
Requires-Dist: kazoo; extra == "zookeeper"
Provides-Extra: test
Requires-Dist: APScheduler[etcd,mongodb,redis,rethinkdb,sqlalchemy,tornado,zookeeper]; extra == "test"
Requires-Dist: pytest; extra == "test"
Requires-Dist: anyio>=4.5.2; extra == "test"
Requires-Dist: PySide6; (platform_python_implementation == "CPython" and python_version < "3.14") and extra == "test"
Requires-Dist: gevent; python_version < "3.14" and extra == "test"
Requires-Dist: pytz; extra == "test"
Requires-Dist: twisted; python_version < "3.14" and extra == "test"
Provides-Extra: doc
Requires-Dist: packaging; extra == "doc"
Requires-Dist: sphinx; extra == "doc"
Requires-Dist: sphinx-rtd-theme>=1.3.0; extra == "doc"
.. image:: https://github.com/agronholm/apscheduler/workflows/Python%20codeqa/test/badge.svg?branch=3.x
:target: https://github.com/agronholm/apscheduler/actions?query=workflow%3A%22Python+codeqa%2Ftest%22+branch%3A3.x
:alt: Build Status
.. image:: https://coveralls.io/repos/github/agronholm/apscheduler/badge.svg?branch=3.x
:target: https://coveralls.io/github/agronholm/apscheduler?branch=3.x
:alt: Code Coverage
.. image:: https://readthedocs.org/projects/apscheduler/badge/?version=3.x
:target: https://apscheduler.readthedocs.io/en/master/?badge=3.x
:alt: Documentation
Advanced Python Scheduler (APScheduler) is a Python library that lets you schedule your Python code
to be executed later, either just once or periodically. You can add new jobs or remove old ones on
the fly as you please. If you store your jobs in a database, they will also survive scheduler
restarts and maintain their state. When the scheduler is restarted, it will then run all the jobs
it should have run while it was offline [#f1]_.
Among other things, APScheduler can be used as a cross-platform, application specific replacement
to platform specific schedulers, such as the cron daemon or the Windows task scheduler. Please
note, however, that APScheduler is **not** a daemon or service itself, nor does it come with any
command line tools. It is primarily meant to be run inside existing applications. That said,
APScheduler does provide some building blocks for you to build a scheduler service or to run a
dedicated scheduler process.
APScheduler has three built-in scheduling systems you can use:
* Cron-style scheduling (with optional start/end times)
* Interval-based execution (runs jobs on even intervals, with optional start/end times)
* One-off delayed execution (runs jobs once, on a set date/time)
You can mix and match scheduling systems and the backends where the jobs are stored any way you
like. Supported backends for storing jobs include:
* Memory
* `SQLAlchemy <http://www.sqlalchemy.org/>`_ (any RDBMS supported by SQLAlchemy works)
* `MongoDB <http://www.mongodb.org/>`_
* `Redis <http://redis.io/>`_
* `RethinkDB <https://www.rethinkdb.com/>`_
* `ZooKeeper <https://zookeeper.apache.org/>`_
* `Etcd <https://etcd.io/>`_
APScheduler also integrates with several common Python frameworks, like:
* `asyncio <http://docs.python.org/3.4/library/asyncio.html>`_ (:pep:`3156`)
* `gevent <http://www.gevent.org/>`_
* `Tornado <http://www.tornadoweb.org/>`_
* `Twisted <http://twistedmatrix.com/>`_
* `Qt <http://qt-project.org/>`_ (using either
`PyQt <http://www.riverbankcomputing.com/software/pyqt/intro>`_ ,
`PySide6 <https://wiki.qt.io/Qt_for_Python>`_ ,
`PySide2 <https://wiki.qt.io/Qt_for_Python>`_ or
`PySide <http://qt-project.org/wiki/PySide>`_)
There are third party solutions for integrating APScheduler with other frameworks:
* `Django <https://github.com/jarekwg/django-apscheduler>`_
* `Flask <https://github.com/viniciuschiele/flask-apscheduler>`_
.. [#f1] The cutoff period for this is also configurable.
Documentation
-------------
Documentation can be found `here <https://apscheduler.readthedocs.io/>`_.
Source
------
The source can be browsed at `Github <https://github.com/agronholm/apscheduler/tree/3.x>`_.
Reporting bugs
--------------
A `bug tracker <https://github.com/agronholm/apscheduler/issues>`_ is provided by Github.
Getting help
------------
If you have problems or other questions, you can either:
* Ask in the `apscheduler <https://gitter.im/apscheduler/Lobby>`_ room on Gitter
* Ask on the `APScheduler GitHub discussion forum <https://github.com/agronholm/apscheduler/discussions>`_, or
* Ask on `StackOverflow <http://stackoverflow.com/questions/tagged/apscheduler>`_ and tag your
question with the ``apscheduler`` tag

View file

@ -0,0 +1,86 @@
APScheduler-3.11.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
APScheduler-3.11.0.dist-info/LICENSE.txt,sha256=YWP3mH37ONa8MgzitwsvArhivEESZRbVUu8c1DJH51g,1130
APScheduler-3.11.0.dist-info/METADATA,sha256=Mve2P3vZbWWDb5V-XfZO80hkih9E6s00Nn5ptU2__9w,6374
APScheduler-3.11.0.dist-info/RECORD,,
APScheduler-3.11.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
APScheduler-3.11.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
APScheduler-3.11.0.dist-info/entry_points.txt,sha256=HSDTxgulLTgymfXK2UNCPP1ib5rlQSFgZJEg72vto3s,1181
APScheduler-3.11.0.dist-info/top_level.txt,sha256=O3oMCWxG-AHkecUoO6Ze7-yYjWrttL95uHO8-RFdYvE,12
apscheduler/__init__.py,sha256=hOpI9oJuk5l5I_VtdsHPous2Qr-ZDX573e7NaYRWFUs,380
apscheduler/__pycache__/__init__.cpython-312.pyc,,
apscheduler/__pycache__/events.cpython-312.pyc,,
apscheduler/__pycache__/job.cpython-312.pyc,,
apscheduler/__pycache__/util.cpython-312.pyc,,
apscheduler/events.py,sha256=W_Wg5aTBXDxXhHtimn93ZEjV3x0ntF-Y0EAVuZPhiXY,3591
apscheduler/executors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
apscheduler/executors/__pycache__/__init__.cpython-312.pyc,,
apscheduler/executors/__pycache__/asyncio.cpython-312.pyc,,
apscheduler/executors/__pycache__/base.cpython-312.pyc,,
apscheduler/executors/__pycache__/debug.cpython-312.pyc,,
apscheduler/executors/__pycache__/gevent.cpython-312.pyc,,
apscheduler/executors/__pycache__/pool.cpython-312.pyc,,
apscheduler/executors/__pycache__/tornado.cpython-312.pyc,,
apscheduler/executors/__pycache__/twisted.cpython-312.pyc,,
apscheduler/executors/asyncio.py,sha256=g0ArcxefoTnEqtyr_IRc-M3dcj0bhuvHcxwRp2s3nDE,1768
apscheduler/executors/base.py,sha256=HErgd8d1g0-BjXnylLcFyoo6GU3wHgW9GJVaFNMV7dI,7116
apscheduler/executors/debug.py,sha256=15_ogSBzl8RRCfBYDnkIV2uMH8cLk1KImYmBa_NVGpc,573
apscheduler/executors/gevent.py,sha256=_ZFpbn7-tH5_lAeL4sxEyPhxyUTtUUSrH8s42EHGQ2w,761
apscheduler/executors/pool.py,sha256=q_shxnvXLjdcwhtKyPvQSYngOjAeKQO8KCvZeb19RSQ,2683
apscheduler/executors/tornado.py,sha256=lb6mshRj7GMLz3d8StwESnlZsAfrNmW78Wokcg__Lk8,1581
apscheduler/executors/twisted.py,sha256=YUEDnaPbP_M0lXCmNAW_yPiLKwbO9vD3KMiBFQ2D4h0,726
apscheduler/job.py,sha256=GzOGMfOM6STwd3HWArVAylO-1Kb0f2qA_PRuXs5LPk4,11153
apscheduler/jobstores/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
apscheduler/jobstores/__pycache__/__init__.cpython-312.pyc,,
apscheduler/jobstores/__pycache__/base.cpython-312.pyc,,
apscheduler/jobstores/__pycache__/etcd.cpython-312.pyc,,
apscheduler/jobstores/__pycache__/memory.cpython-312.pyc,,
apscheduler/jobstores/__pycache__/mongodb.cpython-312.pyc,,
apscheduler/jobstores/__pycache__/redis.cpython-312.pyc,,
apscheduler/jobstores/__pycache__/rethinkdb.cpython-312.pyc,,
apscheduler/jobstores/__pycache__/sqlalchemy.cpython-312.pyc,,
apscheduler/jobstores/__pycache__/zookeeper.cpython-312.pyc,,
apscheduler/jobstores/base.py,sha256=ZDOgMtHLaF3TPUOQwmkBIDcpnHU0aUhtzZOGmMGaJn8,4416
apscheduler/jobstores/etcd.py,sha256=O7C40CGlnn3cPinchJEs2sWcqnzEZQt3c6WnhgPRSdQ,5703
apscheduler/jobstores/memory.py,sha256=HmOs7FbrOoQNywz-yfq2v5esGDHeKE_mvMNFDeGZ31E,3595
apscheduler/jobstores/mongodb.py,sha256=mCIwcKiWcicM2qdAQn51QBEkGlNfbk_73Oi6soShNcM,5319
apscheduler/jobstores/redis.py,sha256=El-H2eUfZjPZca7vwy10B9gZv5RzRucbkDu7Ti07vyM,5482
apscheduler/jobstores/rethinkdb.py,sha256=SdT3jPrhxnmBoL4IClDfHsez4DpREnYEsHndIP8idHA,5922
apscheduler/jobstores/sqlalchemy.py,sha256=2jaq3ZcoXEyIqqvYf3eloaP-_ZAqojt0EuWWvQ2LMRg,6799
apscheduler/jobstores/zookeeper.py,sha256=32bEZNJNniPwmYXBITZ3eSRBq6hipqPKDqh4q4NiZvc,6439
apscheduler/schedulers/__init__.py,sha256=POEy7n3BZgccZ44atMvxj0w5PejN55g-55NduZUZFqQ,406
apscheduler/schedulers/__pycache__/__init__.cpython-312.pyc,,
apscheduler/schedulers/__pycache__/asyncio.cpython-312.pyc,,
apscheduler/schedulers/__pycache__/background.cpython-312.pyc,,
apscheduler/schedulers/__pycache__/base.cpython-312.pyc,,
apscheduler/schedulers/__pycache__/blocking.cpython-312.pyc,,
apscheduler/schedulers/__pycache__/gevent.cpython-312.pyc,,
apscheduler/schedulers/__pycache__/qt.cpython-312.pyc,,
apscheduler/schedulers/__pycache__/tornado.cpython-312.pyc,,
apscheduler/schedulers/__pycache__/twisted.cpython-312.pyc,,
apscheduler/schedulers/asyncio.py,sha256=Jo7tgHP1STnMSxNVAWPSkFpmBLngavivTsG9sF0QoWM,1893
apscheduler/schedulers/background.py,sha256=sRNrikUhpyblvA5RCpKC5Djvf3-b6NHvnXTblxlqIaM,1476
apscheduler/schedulers/base.py,sha256=hvnvcI1DOC9bmvrFk8UiLlGxsXKHtMpEHLDEe63mQ_s,48342
apscheduler/schedulers/blocking.py,sha256=138rf9X1C-ZxWVTVAO_pyfYMBKhkqO2qZqJoyGInv5c,872
apscheduler/schedulers/gevent.py,sha256=zS5nHQUkQMrn0zKOaFnUyiG0fXTE01yE9GXVNCdrd90,987
apscheduler/schedulers/qt.py,sha256=6BHOCi8e6L3wXTWwQDjNl8w_GJF_dY6iiO3gEtCJgmI,1241
apscheduler/schedulers/tornado.py,sha256=dQBQKrTtZLPHuhuzZgrT-laU-estPRWGv9W9kgZETnY,1890
apscheduler/schedulers/twisted.py,sha256=sRkI3hosp-OCLVluR_-wZFCz9auxqqWYauZhtOAoRU4,1778
apscheduler/triggers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
apscheduler/triggers/__pycache__/__init__.cpython-312.pyc,,
apscheduler/triggers/__pycache__/base.cpython-312.pyc,,
apscheduler/triggers/__pycache__/calendarinterval.cpython-312.pyc,,
apscheduler/triggers/__pycache__/combining.cpython-312.pyc,,
apscheduler/triggers/__pycache__/date.cpython-312.pyc,,
apscheduler/triggers/__pycache__/interval.cpython-312.pyc,,
apscheduler/triggers/base.py,sha256=8iKllubaexF456IK9jfi56QTrVIfDDPLavUc8wTlnL0,1333
apscheduler/triggers/calendarinterval.py,sha256=BaH5rbTSVbPk3VhFwA3zORLSuZtYmFudS8GF0YxB51E,7411
apscheduler/triggers/combining.py,sha256=LO0YKgBk8V5YfQ-L3qh8Fb6w0BvNOBghTFeAvZx3_P8,4044
apscheduler/triggers/cron/__init__.py,sha256=ByWq4Q96gUWr4AwKoRRA9BD5ZVBvwQ6BtQMhafdStjw,9753
apscheduler/triggers/cron/__pycache__/__init__.cpython-312.pyc,,
apscheduler/triggers/cron/__pycache__/expressions.cpython-312.pyc,,
apscheduler/triggers/cron/__pycache__/fields.cpython-312.pyc,,
apscheduler/triggers/cron/expressions.py,sha256=89n_HxA0826xBJb8RprVzUDECs0dnZ_rX2wVkVsq6l8,9056
apscheduler/triggers/cron/fields.py,sha256=RVbf6Lcyvg-3CqNzEZsfxzQ_weONCIiq5LGDzA3JUAw,3618
apscheduler/triggers/date.py,sha256=ZS_TMjUCSldqlZsUUjlwvuWeMKeDXqqAMcZVFGYpam4,1698
apscheduler/triggers/interval.py,sha256=u6XLrxlaWA41zvIByQvRLHTAuvkibG2fAZAxrWK3118,4679
apscheduler/util.py,sha256=Lz2ddoeIpufXzW-HWnW5J08ijkXWGElDLVJf0DiPa84,13564

View file

@ -0,0 +1,5 @@
Wheel-Version: 1.0
Generator: setuptools (75.6.0)
Root-Is-Purelib: true
Tag: py3-none-any

View file

@ -0,0 +1,25 @@
[apscheduler.executors]
asyncio = apscheduler.executors.asyncio:AsyncIOExecutor
debug = apscheduler.executors.debug:DebugExecutor
gevent = apscheduler.executors.gevent:GeventExecutor
processpool = apscheduler.executors.pool:ProcessPoolExecutor
threadpool = apscheduler.executors.pool:ThreadPoolExecutor
tornado = apscheduler.executors.tornado:TornadoExecutor
twisted = apscheduler.executors.twisted:TwistedExecutor
[apscheduler.jobstores]
etcd = apscheduler.jobstores.etcd:EtcdJobStore
memory = apscheduler.jobstores.memory:MemoryJobStore
mongodb = apscheduler.jobstores.mongodb:MongoDBJobStore
redis = apscheduler.jobstores.redis:RedisJobStore
rethinkdb = apscheduler.jobstores.rethinkdb:RethinkDBJobStore
sqlalchemy = apscheduler.jobstores.sqlalchemy:SQLAlchemyJobStore
zookeeper = apscheduler.jobstores.zookeeper:ZooKeeperJobStore
[apscheduler.triggers]
and = apscheduler.triggers.combining:AndTrigger
calendarinterval = apscheduler.triggers.calendarinterval:CalendarIntervalTrigger
cron = apscheduler.triggers.cron:CronTrigger
date = apscheduler.triggers.date:DateTrigger
interval = apscheduler.triggers.interval:IntervalTrigger
or = apscheduler.triggers.combining:OrTrigger

View file

@ -0,0 +1 @@
apscheduler

View file

@ -0,0 +1,28 @@
Copyright 2010 Pallets
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,92 @@
Metadata-Version: 2.1
Name: MarkupSafe
Version: 3.0.2
Summary: Safely add untrusted strings to HTML/XML markup.
Maintainer-email: Pallets <contact@palletsprojects.com>
License: Copyright 2010 Pallets
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Project-URL: Donate, https://palletsprojects.com/donate
Project-URL: Documentation, https://markupsafe.palletsprojects.com/
Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/
Project-URL: Source, https://github.com/pallets/markupsafe/
Project-URL: Chat, https://discord.gg/pallets
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
Classifier: Topic :: Text Processing :: Markup :: HTML
Classifier: Typing :: Typed
Requires-Python: >=3.9
Description-Content-Type: text/markdown
License-File: LICENSE.txt
# MarkupSafe
MarkupSafe implements a text object that escapes characters so it is
safe to use in HTML and XML. Characters that have special meanings are
replaced so that they display as the actual characters. This mitigates
injection attacks, meaning untrusted user input can safely be displayed
on a page.
## Examples
```pycon
>>> from markupsafe import Markup, escape
>>> # escape replaces special characters and wraps in Markup
>>> escape("<script>alert(document.cookie);</script>")
Markup('&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
>>> # wrap in Markup to mark text "safe" and prevent escaping
>>> Markup("<strong>Hello</strong>")
Markup('<strong>hello</strong>')
>>> escape(Markup("<strong>Hello</strong>"))
Markup('<strong>hello</strong>')
>>> # Markup is a str subclass
>>> # methods and operators escape their arguments
>>> template = Markup("Hello <em>{name}</em>")
>>> template.format(name='"World"')
Markup('Hello <em>&#34;World&#34;</em>')
```
## Donate
The Pallets organization develops and supports MarkupSafe and other
popular packages. In order to grow the community of contributors and
users, and allow the maintainers to devote more time to the projects,
[please donate today][].
[please donate today]: https://palletsprojects.com/donate

View file

@ -0,0 +1,14 @@
MarkupSafe-3.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
MarkupSafe-3.0.2.dist-info/LICENSE.txt,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475
MarkupSafe-3.0.2.dist-info/METADATA,sha256=aAwbZhSmXdfFuMM-rEHpeiHRkBOGESyVLJIuwzHP-nw,3975
MarkupSafe-3.0.2.dist-info/RECORD,,
MarkupSafe-3.0.2.dist-info/WHEEL,sha256=OVgtqZzfzIXXtylXP90gxCZ6CKBCwKYyHM8PpMEjN1M,151
MarkupSafe-3.0.2.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
markupsafe/__init__.py,sha256=sr-U6_27DfaSrj5jnHYxWN-pvhM27sjlDplMDPZKm7k,13214
markupsafe/__pycache__/__init__.cpython-312.pyc,,
markupsafe/__pycache__/_native.cpython-312.pyc,,
markupsafe/_native.py,sha256=hSLs8Jmz5aqayuengJJ3kdT5PwNpBWpKrmQSdipndC8,210
markupsafe/_speedups.c,sha256=O7XulmTo-epI6n2FtMVOrJXl8EAaIwD2iNYmBI5SEoQ,4149
markupsafe/_speedups.cpython-312-x86_64-linux-gnu.so,sha256=t1DBZlpsjFA30BOOvXfXfT1wvO_4cS16VbHz1-49q5U,43432
markupsafe/_speedups.pyi,sha256=ENd1bYe7gbBUf2ywyYWOGUpnXOHNJ-cgTNqetlW8h5k,41
markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0

View file

@ -0,0 +1,6 @@
Wheel-Version: 1.0
Generator: setuptools (75.2.0)
Root-Is-Purelib: false
Tag: cp312-cp312-manylinux_2_17_x86_64
Tag: cp312-cp312-manylinux2014_x86_64

View file

@ -0,0 +1 @@
markupsafe

View file

@ -0,0 +1,11 @@
import importlib.metadata as importlib_metadata
import sys
try:
release = importlib_metadata.version("APScheduler").split("-")[0]
except importlib_metadata.PackageNotFoundError:
release = "3.5.0"
version_info = tuple(int(x) if x.isdigit() else x for x in release.split("."))
version = __version__ = ".".join(str(x) for x in version_info[:3])
del sys, importlib_metadata

View file

@ -0,0 +1,134 @@
__all__ = (
"EVENT_SCHEDULER_STARTED",
"EVENT_SCHEDULER_SHUTDOWN",
"EVENT_SCHEDULER_PAUSED",
"EVENT_SCHEDULER_RESUMED",
"EVENT_EXECUTOR_ADDED",
"EVENT_EXECUTOR_REMOVED",
"EVENT_JOBSTORE_ADDED",
"EVENT_JOBSTORE_REMOVED",
"EVENT_ALL_JOBS_REMOVED",
"EVENT_JOB_ADDED",
"EVENT_JOB_REMOVED",
"EVENT_JOB_MODIFIED",
"EVENT_JOB_EXECUTED",
"EVENT_JOB_ERROR",
"EVENT_JOB_MISSED",
"EVENT_JOB_SUBMITTED",
"EVENT_JOB_MAX_INSTANCES",
"EVENT_ALL",
"SchedulerEvent",
"JobEvent",
"JobExecutionEvent",
"JobSubmissionEvent",
)
EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2**0
EVENT_SCHEDULER_SHUTDOWN = 2**1
EVENT_SCHEDULER_PAUSED = 2**2
EVENT_SCHEDULER_RESUMED = 2**3
EVENT_EXECUTOR_ADDED = 2**4
EVENT_EXECUTOR_REMOVED = 2**5
EVENT_JOBSTORE_ADDED = 2**6
EVENT_JOBSTORE_REMOVED = 2**7
EVENT_ALL_JOBS_REMOVED = 2**8
EVENT_JOB_ADDED = 2**9
EVENT_JOB_REMOVED = 2**10
EVENT_JOB_MODIFIED = 2**11
EVENT_JOB_EXECUTED = 2**12
EVENT_JOB_ERROR = 2**13
EVENT_JOB_MISSED = 2**14
EVENT_JOB_SUBMITTED = 2**15
EVENT_JOB_MAX_INSTANCES = 2**16
EVENT_ALL = (
EVENT_SCHEDULER_STARTED
| EVENT_SCHEDULER_SHUTDOWN
| EVENT_SCHEDULER_PAUSED
| EVENT_SCHEDULER_RESUMED
| EVENT_EXECUTOR_ADDED
| EVENT_EXECUTOR_REMOVED
| EVENT_JOBSTORE_ADDED
| EVENT_JOBSTORE_REMOVED
| EVENT_ALL_JOBS_REMOVED
| EVENT_JOB_ADDED
| EVENT_JOB_REMOVED
| EVENT_JOB_MODIFIED
| EVENT_JOB_EXECUTED
| EVENT_JOB_ERROR
| EVENT_JOB_MISSED
| EVENT_JOB_SUBMITTED
| EVENT_JOB_MAX_INSTANCES
)
class SchedulerEvent:
"""
An event that concerns the scheduler itself.
:ivar code: the type code of this event
:ivar alias: alias of the job store or executor that was added or removed (if applicable)
"""
def __init__(self, code, alias=None):
super().__init__()
self.code = code
self.alias = alias
def __repr__(self):
return "<%s (code=%d)>" % (self.__class__.__name__, self.code)
class JobEvent(SchedulerEvent):
"""
An event that concerns a job.
:ivar code: the type code of this event
:ivar job_id: identifier of the job in question
:ivar jobstore: alias of the job store containing the job in question
"""
def __init__(self, code, job_id, jobstore):
super().__init__(code)
self.code = code
self.job_id = job_id
self.jobstore = jobstore
class JobSubmissionEvent(JobEvent):
"""
An event that concerns the submission of a job to its executor.
:ivar scheduled_run_times: a list of datetimes when the job was intended to run
"""
def __init__(self, code, job_id, jobstore, scheduled_run_times):
super().__init__(code, job_id, jobstore)
self.scheduled_run_times = scheduled_run_times
class JobExecutionEvent(JobEvent):
"""
An event that concerns the running of a job within its executor.
:ivar scheduled_run_time: the time when the job was scheduled to be run
:ivar retval: the return value of the successfully executed job
:ivar exception: the exception raised by the job
:ivar traceback: a formatted traceback for the exception
"""
def __init__(
self,
code,
job_id,
jobstore,
scheduled_run_time,
retval=None,
exception=None,
traceback=None,
):
super().__init__(code, job_id, jobstore)
self.scheduled_run_time = scheduled_run_time
self.retval = retval
self.exception = exception
self.traceback = traceback

View file

@ -0,0 +1,52 @@
import sys
from apscheduler.executors.base import BaseExecutor, run_coroutine_job, run_job
from apscheduler.util import iscoroutinefunction_partial
class AsyncIOExecutor(BaseExecutor):
"""
Runs jobs in the default executor of the event loop.
If the job function is a native coroutine function, it is scheduled to be run directly in the
event loop as soon as possible. All other functions are run in the event loop's default
executor which is usually a thread pool.
Plugin alias: ``asyncio``
"""
def start(self, scheduler, alias):
super().start(scheduler, alias)
self._eventloop = scheduler._eventloop
self._pending_futures = set()
def shutdown(self, wait=True):
# There is no way to honor wait=True without converting this method into a coroutine method
for f in self._pending_futures:
if not f.done():
f.cancel()
self._pending_futures.clear()
def _do_submit_job(self, job, run_times):
def callback(f):
self._pending_futures.discard(f)
try:
events = f.result()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
if iscoroutinefunction_partial(job.func):
coro = run_coroutine_job(
job, job._jobstore_alias, run_times, self._logger.name
)
f = self._eventloop.create_task(coro)
else:
f = self._eventloop.run_in_executor(
None, run_job, job, job._jobstore_alias, run_times, self._logger.name
)
f.add_done_callback(callback)
self._pending_futures.add(f)

View file

@ -0,0 +1,205 @@
import logging
import sys
import traceback
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from traceback import format_tb
from apscheduler.events import (
EVENT_JOB_ERROR,
EVENT_JOB_EXECUTED,
EVENT_JOB_MISSED,
JobExecutionEvent,
)
class MaxInstancesReachedError(Exception):
def __init__(self, job):
super().__init__(
'Job "%s" has already reached its maximum number of instances (%d)'
% (job.id, job.max_instances)
)
class BaseExecutor(metaclass=ABCMeta):
"""Abstract base class that defines the interface that every executor must implement."""
_scheduler = None
_lock = None
_logger = logging.getLogger("apscheduler.executors")
def __init__(self):
super().__init__()
self._instances = defaultdict(lambda: 0)
def start(self, scheduler, alias):
"""
Called by the scheduler when the scheduler is being started or when the executor is being
added to an already running scheduler.
:param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
this executor
:param str|unicode alias: alias of this executor as it was assigned to the scheduler
"""
self._scheduler = scheduler
self._lock = scheduler._create_lock()
self._logger = logging.getLogger(f"apscheduler.executors.{alias}")
def shutdown(self, wait=True):
"""
Shuts down this executor.
:param bool wait: ``True`` to wait until all submitted jobs
have been executed
"""
def submit_job(self, job, run_times):
"""
Submits job for execution.
:param Job job: job to execute
:param list[datetime] run_times: list of datetimes specifying
when the job should have been run
:raises MaxInstancesReachedError: if the maximum number of
allowed instances for this job has been reached
"""
assert self._lock is not None, "This executor has not been started yet"
with self._lock:
if self._instances[job.id] >= job.max_instances:
raise MaxInstancesReachedError(job)
self._do_submit_job(job, run_times)
self._instances[job.id] += 1
@abstractmethod
def _do_submit_job(self, job, run_times):
"""Performs the actual task of scheduling `run_job` to be called."""
def _run_job_success(self, job_id, events):
"""
Called by the executor with the list of generated events when :func:`run_job` has been
successfully called.
"""
with self._lock:
self._instances[job_id] -= 1
if self._instances[job_id] == 0:
del self._instances[job_id]
for event in events:
self._scheduler._dispatch_event(event)
def _run_job_error(self, job_id, exc, traceback=None):
"""Called by the executor with the exception if there is an error calling `run_job`."""
with self._lock:
self._instances[job_id] -= 1
if self._instances[job_id] == 0:
del self._instances[job_id]
exc_info = (exc.__class__, exc, traceback)
self._logger.error("Error running job %s", job_id, exc_info=exc_info)
def run_job(job, jobstore_alias, run_times, logger_name):
"""
Called by executors to run the job. Returns a list of scheduler events to be dispatched by the
scheduler.
"""
events = []
logger = logging.getLogger(logger_name)
for run_time in run_times:
# See if the job missed its run time window, and handle
# possible misfires accordingly
if job.misfire_grace_time is not None:
difference = datetime.now(timezone.utc) - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
events.append(
JobExecutionEvent(
EVENT_JOB_MISSED, job.id, jobstore_alias, run_time
)
)
logger.warning('Run time of job "%s" was missed by %s', job, difference)
continue
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
try:
retval = job.func(*job.args, **job.kwargs)
except BaseException:
exc, tb = sys.exc_info()[1:]
formatted_tb = "".join(format_tb(tb))
events.append(
JobExecutionEvent(
EVENT_JOB_ERROR,
job.id,
jobstore_alias,
run_time,
exception=exc,
traceback=formatted_tb,
)
)
logger.exception('Job "%s" raised an exception', job)
# This is to prevent cyclic references that would lead to memory leaks
traceback.clear_frames(tb)
del tb
else:
events.append(
JobExecutionEvent(
EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval
)
)
logger.info('Job "%s" executed successfully', job)
return events
async def run_coroutine_job(job, jobstore_alias, run_times, logger_name):
"""Coroutine version of run_job()."""
events = []
logger = logging.getLogger(logger_name)
for run_time in run_times:
# See if the job missed its run time window, and handle possible misfires accordingly
if job.misfire_grace_time is not None:
difference = datetime.now(timezone.utc) - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
events.append(
JobExecutionEvent(
EVENT_JOB_MISSED, job.id, jobstore_alias, run_time
)
)
logger.warning('Run time of job "%s" was missed by %s', job, difference)
continue
logger.info('Running job "%s" (scheduled at %s)', job, run_time)
try:
retval = await job.func(*job.args, **job.kwargs)
except BaseException:
exc, tb = sys.exc_info()[1:]
formatted_tb = "".join(format_tb(tb))
events.append(
JobExecutionEvent(
EVENT_JOB_ERROR,
job.id,
jobstore_alias,
run_time,
exception=exc,
traceback=formatted_tb,
)
)
logger.exception('Job "%s" raised an exception', job)
traceback.clear_frames(tb)
else:
events.append(
JobExecutionEvent(
EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time, retval=retval
)
)
logger.info('Job "%s" executed successfully', job)
return events

View file

@ -0,0 +1,20 @@
import sys
from apscheduler.executors.base import BaseExecutor, run_job
class DebugExecutor(BaseExecutor):
"""
A special executor that executes the target callable directly instead of deferring it to a
thread or process.
Plugin alias: ``debug``
"""
def _do_submit_job(self, job, run_times):
try:
events = run_job(job, job._jobstore_alias, run_times, self._logger.name)
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)

View file

@ -0,0 +1,29 @@
import sys
from apscheduler.executors.base import BaseExecutor, run_job
try:
import gevent
except ImportError as exc: # pragma: nocover
raise ImportError("GeventExecutor requires gevent installed") from exc
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(
run_job, job, job._jobstore_alias, run_times, self._logger.name
).link(callback)

View file

@ -0,0 +1,82 @@
import concurrent.futures
import multiprocessing
from abc import abstractmethod
from concurrent.futures.process import BrokenProcessPool
from apscheduler.executors.base import BaseExecutor, run_job
class BasePoolExecutor(BaseExecutor):
@abstractmethod
def __init__(self, pool):
super().__init__()
self._pool = pool
def _do_submit_job(self, job, run_times):
def callback(f):
exc, tb = (
f.exception_info()
if hasattr(f, "exception_info")
else (f.exception(), getattr(f.exception(), "__traceback__", None))
)
if exc:
self._run_job_error(job.id, exc, tb)
else:
self._run_job_success(job.id, f.result())
f = self._pool.submit(
run_job, job, job._jobstore_alias, run_times, self._logger.name
)
f.add_done_callback(callback)
def shutdown(self, wait=True):
self._pool.shutdown(wait)
class ThreadPoolExecutor(BasePoolExecutor):
"""
An executor that runs jobs in a concurrent.futures thread pool.
Plugin alias: ``threadpool``
:param max_workers: the maximum number of spawned threads.
:param pool_kwargs: dict of keyword arguments to pass to the underlying
ThreadPoolExecutor constructor
"""
def __init__(self, max_workers=10, pool_kwargs=None):
pool_kwargs = pool_kwargs or {}
pool = concurrent.futures.ThreadPoolExecutor(int(max_workers), **pool_kwargs)
super().__init__(pool)
class ProcessPoolExecutor(BasePoolExecutor):
"""
An executor that runs jobs in a concurrent.futures process pool.
Plugin alias: ``processpool``
:param max_workers: the maximum number of spawned processes.
:param pool_kwargs: dict of keyword arguments to pass to the underlying
ProcessPoolExecutor constructor
"""
def __init__(self, max_workers=10, pool_kwargs=None):
self.pool_kwargs = pool_kwargs or {}
self.pool_kwargs.setdefault("mp_context", multiprocessing.get_context("spawn"))
pool = concurrent.futures.ProcessPoolExecutor(
int(max_workers), **self.pool_kwargs
)
super().__init__(pool)
def _do_submit_job(self, job, run_times):
try:
super()._do_submit_job(job, run_times)
except BrokenProcessPool:
self._logger.warning(
"Process pool is broken; replacing pool with a fresh instance"
)
self._pool = self._pool.__class__(
self._pool._max_workers, **self.pool_kwargs
)
super()._do_submit_job(job, run_times)

View file

@ -0,0 +1,49 @@
import sys
from concurrent.futures import ThreadPoolExecutor
from tornado.gen import convert_yielded
from apscheduler.executors.base import BaseExecutor, run_coroutine_job, run_job
from apscheduler.util import iscoroutinefunction_partial
class TornadoExecutor(BaseExecutor):
"""
Runs jobs either in a thread pool or directly on the I/O loop.
If the job function is a native coroutine function, it is scheduled to be run directly in the
I/O loop as soon as possible. All other functions are run in a thread pool.
Plugin alias: ``tornado``
:param int max_workers: maximum number of worker threads in the thread pool
"""
def __init__(self, max_workers=10):
super().__init__()
self.executor = ThreadPoolExecutor(max_workers)
def start(self, scheduler, alias):
super().start(scheduler, alias)
self._ioloop = scheduler._ioloop
def _do_submit_job(self, job, run_times):
def callback(f):
try:
events = f.result()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
if iscoroutinefunction_partial(job.func):
f = run_coroutine_job(
job, job._jobstore_alias, run_times, self._logger.name
)
else:
f = self.executor.submit(
run_job, job, job._jobstore_alias, run_times, self._logger.name
)
f = convert_yielded(f)
f.add_done_callback(callback)

View file

@ -0,0 +1,24 @@
from apscheduler.executors.base import BaseExecutor, run_job
class TwistedExecutor(BaseExecutor):
"""
Runs jobs in the reactor's thread pool.
Plugin alias: ``twisted``
"""
def start(self, scheduler, alias):
super().start(scheduler, alias)
self._reactor = scheduler._reactor
def _do_submit_job(self, job, run_times):
def callback(success, result):
if success:
self._run_job_success(job.id, result)
else:
self._run_job_error(job.id, result.value, result.tb)
self._reactor.getThreadPool().callInThreadWithCallback(
callback, run_job, job, job._jobstore_alias, run_times, self._logger.name
)

View file

@ -0,0 +1,330 @@
from collections.abc import Iterable, Mapping
from inspect import isclass, ismethod
from uuid import uuid4
from apscheduler.triggers.base import BaseTrigger
from apscheduler.util import (
check_callable_args,
convert_to_datetime,
datetime_repr,
get_callable_name,
obj_to_ref,
ref_to_obj,
)
class Job:
"""
Contains the options given when scheduling callables and its current schedule and other state.
This class should never be instantiated by the user.
:var str id: the unique identifier of this job
:var str name: the description of this job
:var func: the callable to execute
:var tuple|list args: positional arguments to the callable
:var dict kwargs: keyword arguments to the callable
:var bool coalesce: whether to only run the job once when several run times are due
:var trigger: the trigger object that controls the schedule of this job
:var str executor: the name of the executor that will run this job
:var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to
be late (``None`` means "allow the job to run no matter how late it is")
:var int max_instances: the maximum number of concurrently executing instances allowed for this
job
:var datetime.datetime next_run_time: the next scheduled run time of this job
.. note::
The ``misfire_grace_time`` has some non-obvious effects on job execution. See the
:ref:`missed-job-executions` section in the documentation for an in-depth explanation.
"""
__slots__ = (
"_scheduler",
"_jobstore_alias",
"id",
"trigger",
"executor",
"func",
"func_ref",
"args",
"kwargs",
"name",
"misfire_grace_time",
"coalesce",
"max_instances",
"next_run_time",
"__weakref__",
)
def __init__(self, scheduler, id=None, **kwargs):
super().__init__()
self._scheduler = scheduler
self._jobstore_alias = None
self._modify(id=id or uuid4().hex, **kwargs)
def modify(self, **changes):
"""
Makes the given changes to this job and saves it in the associated job store.
Accepted keyword arguments are the same as the variables on this class.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job`
:return Job: this job instance
"""
self._scheduler.modify_job(self.id, self._jobstore_alias, **changes)
return self
def reschedule(self, trigger, **trigger_args):
"""
Shortcut for switching the trigger on this job.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job`
:return Job: this job instance
"""
self._scheduler.reschedule_job(
self.id, self._jobstore_alias, trigger, **trigger_args
)
return self
def pause(self):
"""
Temporarily suspend the execution of this job.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.pause_job`
:return Job: this job instance
"""
self._scheduler.pause_job(self.id, self._jobstore_alias)
return self
def resume(self):
"""
Resume the schedule of this job if previously paused.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.resume_job`
:return Job: this job instance
"""
self._scheduler.resume_job(self.id, self._jobstore_alias)
return self
def remove(self):
"""
Unschedules this job and removes it from its associated job store.
.. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job`
"""
self._scheduler.remove_job(self.id, self._jobstore_alias)
@property
def pending(self):
"""
Returns ``True`` if the referenced job is still waiting to be added to its designated job
store.
"""
return self._jobstore_alias is None
#
# Private API
#
def _get_run_times(self, now):
"""
Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive).
:type now: datetime.datetime
:rtype: list[datetime.datetime]
"""
run_times = []
next_run_time = self.next_run_time
while next_run_time and next_run_time <= now:
run_times.append(next_run_time)
next_run_time = self.trigger.get_next_fire_time(next_run_time, now)
return run_times
def _modify(self, **changes):
"""
Validates the changes to the Job and makes the modifications if and only if all of them
validate.
"""
approved = {}
if "id" in changes:
value = changes.pop("id")
if not isinstance(value, str):
raise TypeError("id must be a nonempty string")
if hasattr(self, "id"):
raise ValueError("The job ID may not be changed")
approved["id"] = value
if "func" in changes or "args" in changes or "kwargs" in changes:
func = changes.pop("func") if "func" in changes else self.func
args = changes.pop("args") if "args" in changes else self.args
kwargs = changes.pop("kwargs") if "kwargs" in changes else self.kwargs
if isinstance(func, str):
func_ref = func
func = ref_to_obj(func)
elif callable(func):
try:
func_ref = obj_to_ref(func)
except ValueError:
# If this happens, this Job won't be serializable
func_ref = None
else:
raise TypeError("func must be a callable or a textual reference to one")
if not hasattr(self, "name") and changes.get("name", None) is None:
changes["name"] = get_callable_name(func)
if isinstance(args, str) or not isinstance(args, Iterable):
raise TypeError("args must be a non-string iterable")
if isinstance(kwargs, str) or not isinstance(kwargs, Mapping):
raise TypeError("kwargs must be a dict-like object")
check_callable_args(func, args, kwargs)
approved["func"] = func
approved["func_ref"] = func_ref
approved["args"] = args
approved["kwargs"] = kwargs
if "name" in changes:
value = changes.pop("name")
if not value or not isinstance(value, str):
raise TypeError("name must be a nonempty string")
approved["name"] = value
if "misfire_grace_time" in changes:
value = changes.pop("misfire_grace_time")
if value is not None and (not isinstance(value, int) or value <= 0):
raise TypeError(
"misfire_grace_time must be either None or a positive integer"
)
approved["misfire_grace_time"] = value
if "coalesce" in changes:
value = bool(changes.pop("coalesce"))
approved["coalesce"] = value
if "max_instances" in changes:
value = changes.pop("max_instances")
if not isinstance(value, int) or value <= 0:
raise TypeError("max_instances must be a positive integer")
approved["max_instances"] = value
if "trigger" in changes:
trigger = changes.pop("trigger")
if not isinstance(trigger, BaseTrigger):
raise TypeError(
f"Expected a trigger instance, got {trigger.__class__.__name__} instead"
)
approved["trigger"] = trigger
if "executor" in changes:
value = changes.pop("executor")
if not isinstance(value, str):
raise TypeError("executor must be a string")
approved["executor"] = value
if "next_run_time" in changes:
value = changes.pop("next_run_time")
approved["next_run_time"] = convert_to_datetime(
value, self._scheduler.timezone, "next_run_time"
)
if changes:
raise AttributeError(
"The following are not modifiable attributes of Job: {}".format(
", ".join(changes)
)
)
for key, value in approved.items():
setattr(self, key, value)
def __getstate__(self):
# Don't allow this Job to be serialized if the function reference could not be determined
if not self.func_ref:
raise ValueError(
f"This Job cannot be serialized since the reference to its callable ({self.func!r}) could not "
"be determined. Consider giving a textual reference (module:function name) "
"instead."
)
# Instance methods cannot survive serialization as-is, so store the "self" argument
# explicitly
func = self.func
if (
ismethod(func)
and not isclass(func.__self__)
and obj_to_ref(func) == self.func_ref
):
args = (func.__self__,) + tuple(self.args)
else:
args = self.args
return {
"version": 1,
"id": self.id,
"func": self.func_ref,
"trigger": self.trigger,
"executor": self.executor,
"args": args,
"kwargs": self.kwargs,
"name": self.name,
"misfire_grace_time": self.misfire_grace_time,
"coalesce": self.coalesce,
"max_instances": self.max_instances,
"next_run_time": self.next_run_time,
}
def __setstate__(self, state):
if state.get("version", 1) > 1:
raise ValueError(
f"Job has version {state['version']}, but only version 1 can be handled"
)
self.id = state["id"]
self.func_ref = state["func"]
self.func = ref_to_obj(self.func_ref)
self.trigger = state["trigger"]
self.executor = state["executor"]
self.args = state["args"]
self.kwargs = state["kwargs"]
self.name = state["name"]
self.misfire_grace_time = state["misfire_grace_time"]
self.coalesce = state["coalesce"]
self.max_instances = state["max_instances"]
self.next_run_time = state["next_run_time"]
def __eq__(self, other):
if isinstance(other, Job):
return self.id == other.id
return NotImplemented
def __repr__(self):
return f"<Job (id={self.id} name={self.name})>"
def __str__(self):
if hasattr(self, "next_run_time"):
status = (
"next run at: " + datetime_repr(self.next_run_time)
if self.next_run_time
else "paused"
)
else:
status = "pending"
return f"{self.name} (trigger: {self.trigger}, {status})"

View file

@ -0,0 +1,141 @@
import logging
from abc import ABCMeta, abstractmethod
class JobLookupError(KeyError):
"""Raised when the job store cannot find a job for update or removal."""
def __init__(self, job_id):
super().__init__(f"No job by the id of {job_id} was found")
class ConflictingIdError(KeyError):
"""Raised when the uniqueness of job IDs is being violated."""
def __init__(self, job_id):
super().__init__(f"Job identifier ({job_id}) conflicts with an existing job")
class TransientJobError(ValueError):
"""
Raised when an attempt to add transient (with no func_ref) job to a persistent job store is
detected.
"""
def __init__(self, job_id):
super().__init__(
f"Job ({job_id}) cannot be added to this job store because a reference to the callable "
"could not be determined."
)
class BaseJobStore(metaclass=ABCMeta):
"""Abstract base class that defines the interface that every job store must implement."""
_scheduler = None
_alias = None
_logger = logging.getLogger("apscheduler.jobstores")
def start(self, scheduler, alias):
"""
Called by the scheduler when the scheduler is being started or when the job store is being
added to an already running scheduler.
:param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
this job store
:param str|unicode alias: alias of this job store as it was assigned to the scheduler
"""
self._scheduler = scheduler
self._alias = alias
self._logger = logging.getLogger(f"apscheduler.jobstores.{alias}")
def shutdown(self):
"""Frees any resources still bound to this job store."""
def _fix_paused_jobs_sorting(self, jobs):
for i, job in enumerate(jobs):
if job.next_run_time is not None:
if i > 0:
paused_jobs = jobs[:i]
del jobs[:i]
jobs.extend(paused_jobs)
break
@abstractmethod
def lookup_job(self, job_id):
"""
Returns a specific job, or ``None`` if it isn't found..
The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of
the returned job to point to the scheduler and itself, respectively.
:param str|unicode job_id: identifier of the job
:rtype: Job
"""
@abstractmethod
def get_due_jobs(self, now):
"""
Returns the list of jobs that have ``next_run_time`` earlier or equal to ``now``.
The returned jobs must be sorted by next run time (ascending).
:param datetime.datetime now: the current (timezone aware) datetime
:rtype: list[Job]
"""
@abstractmethod
def get_next_run_time(self):
"""
Returns the earliest run time of all the jobs stored in this job store, or ``None`` if
there are no active jobs.
:rtype: datetime.datetime
"""
@abstractmethod
def get_all_jobs(self):
"""
Returns a list of all jobs in this job store.
The returned jobs should be sorted by next run time (ascending).
Paused jobs (next_run_time == None) should be sorted last.
The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of
the returned jobs to point to the scheduler and itself, respectively.
:rtype: list[Job]
"""
@abstractmethod
def add_job(self, job):
"""
Adds the given job to this store.
:param Job job: the job to add
:raises ConflictingIdError: if there is another job in this store with the same ID
"""
@abstractmethod
def update_job(self, job):
"""
Replaces the job in the store with the given newer version.
:param Job job: the job to update
:raises JobLookupError: if the job does not exist
"""
@abstractmethod
def remove_job(self, job_id):
"""
Removes the given job from this store.
:param str|unicode job_id: identifier of the job
:raises JobLookupError: if the job does not exist
"""
@abstractmethod
def remove_all_jobs(self):
"""Removes all jobs from this store."""
def __repr__(self):
return f"<{self.__class__.__name__}>"

View file

@ -0,0 +1,170 @@
import pickle
from datetime import datetime, timezone
from apscheduler.job import Job
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
from apscheduler.util import (
datetime_to_utc_timestamp,
maybe_ref,
utc_timestamp_to_datetime,
)
try:
from etcd3 import Etcd3Client
except ImportError as exc: # pragma: nocover
raise ImportError("EtcdJobStore requires etcd3 be installed") from exc
class EtcdJobStore(BaseJobStore):
"""
Stores jobs in a etcd. Any leftover keyword arguments are directly passed to
etcd3's `etcd3.client
<https://python-etcd3.readthedocs.io/en/latest/readme.html>`_.
Plugin alias: ``etcd``
:param str path: path to store jobs in
:param client: a :class:`~etcd3.client.etcd3` instance to use instead of
providing connection arguments
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
"""
def __init__(
self,
path="/apscheduler",
client=None,
close_connection_on_exit=False,
pickle_protocol=pickle.DEFAULT_PROTOCOL,
**connect_args,
):
super().__init__()
self.pickle_protocol = pickle_protocol
self.close_connection_on_exit = close_connection_on_exit
if not path:
raise ValueError('The "path" parameter must not be empty')
self.path = path
if client:
self.client = maybe_ref(client)
else:
self.client = Etcd3Client(**connect_args)
def lookup_job(self, job_id):
node_path = self.path + "/" + str(job_id)
try:
content, _ = self.client.get(node_path)
content = pickle.loads(content)
job = self._reconstitute_job(content["job_state"])
return job
except BaseException:
return None
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
jobs = [
job_record["job"]
for job_record in self._get_jobs()
if job_record["next_run_time"] is not None
and job_record["next_run_time"] <= timestamp
]
return jobs
def get_next_run_time(self):
next_runs = [
job_record["next_run_time"]
for job_record in self._get_jobs()
if job_record["next_run_time"] is not None
]
return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None
def get_all_jobs(self):
jobs = [job_record["job"] for job_record in self._get_jobs()]
self._fix_paused_jobs_sorting(jobs)
return jobs
def add_job(self, job):
node_path = self.path + "/" + str(job.id)
value = {
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": job.__getstate__(),
}
data = pickle.dumps(value, self.pickle_protocol)
status = self.client.put_if_not_exists(node_path, value=data)
if not status:
raise ConflictingIdError(job.id)
def update_job(self, job):
node_path = self.path + "/" + str(job.id)
changes = {
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": job.__getstate__(),
}
data = pickle.dumps(changes, self.pickle_protocol)
status, _ = self.client.transaction(
compare=[self.client.transactions.version(node_path) > 0],
success=[self.client.transactions.put(node_path, value=data)],
failure=[],
)
if not status:
raise JobLookupError(job.id)
def remove_job(self, job_id):
node_path = self.path + "/" + str(job_id)
status, _ = self.client.transaction(
compare=[self.client.transactions.version(node_path) > 0],
success=[self.client.transactions.delete(node_path)],
failure=[],
)
if not status:
raise JobLookupError(job_id)
def remove_all_jobs(self):
self.client.delete_prefix(self.path)
def shutdown(self):
self.client.close()
def _reconstitute_job(self, job_state):
job_state = job_state
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _get_jobs(self):
jobs = []
failed_job_ids = []
all_ids = list(self.client.get_prefix(self.path))
for doc, _ in all_ids:
try:
content = pickle.loads(doc)
job_record = {
"next_run_time": content["next_run_time"],
"job": self._reconstitute_job(content["job_state"]),
}
jobs.append(job_record)
except BaseException:
content = pickle.loads(doc)
failed_id = content["job_state"]["id"]
failed_job_ids.append(failed_id)
self._logger.exception(
'Unable to restore job "%s" -- removing it', failed_id
)
if failed_job_ids:
for failed_id in failed_job_ids:
self.remove_job(failed_id)
paused_sort_key = datetime(9999, 12, 31, tzinfo=timezone.utc)
return sorted(
jobs,
key=lambda job_record: job_record["job"].next_run_time or paused_sort_key,
)
def __repr__(self):
self._logger.exception("<%s (client=%s)>", self.__class__.__name__, self.client)
return f"<{self.__class__.__name__} (client={self.client})>"

View file

@ -0,0 +1,106 @@
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
from apscheduler.util import datetime_to_utc_timestamp
class MemoryJobStore(BaseJobStore):
"""
Stores jobs in an array in RAM. Provides no persistence support.
Plugin alias: ``memory``
"""
def __init__(self):
super().__init__()
# list of (job, timestamp), sorted by next_run_time and job id (ascending)
self._jobs = []
self._jobs_index = {} # id -> (job, timestamp) lookup table
def lookup_job(self, job_id):
return self._jobs_index.get(job_id, (None, None))[0]
def get_due_jobs(self, now):
now_timestamp = datetime_to_utc_timestamp(now)
pending = []
for job, timestamp in self._jobs:
if timestamp is None or timestamp > now_timestamp:
break
pending.append(job)
return pending
def get_next_run_time(self):
return self._jobs[0][0].next_run_time if self._jobs else None
def get_all_jobs(self):
return [j[0] for j in self._jobs]
def add_job(self, job):
if job.id in self._jobs_index:
raise ConflictingIdError(job.id)
timestamp = datetime_to_utc_timestamp(job.next_run_time)
index = self._get_job_index(timestamp, job.id)
self._jobs.insert(index, (job, timestamp))
self._jobs_index[job.id] = (job, timestamp)
def update_job(self, job):
old_job, old_timestamp = self._jobs_index.get(job.id, (None, None))
if old_job is None:
raise JobLookupError(job.id)
# If the next run time has not changed, simply replace the job in its present index.
# Otherwise, reinsert the job to the list to preserve the ordering.
old_index = self._get_job_index(old_timestamp, old_job.id)
new_timestamp = datetime_to_utc_timestamp(job.next_run_time)
if old_timestamp == new_timestamp:
self._jobs[old_index] = (job, new_timestamp)
else:
del self._jobs[old_index]
new_index = self._get_job_index(new_timestamp, job.id)
self._jobs.insert(new_index, (job, new_timestamp))
self._jobs_index[old_job.id] = (job, new_timestamp)
def remove_job(self, job_id):
job, timestamp = self._jobs_index.get(job_id, (None, None))
if job is None:
raise JobLookupError(job_id)
index = self._get_job_index(timestamp, job_id)
del self._jobs[index]
del self._jobs_index[job.id]
def remove_all_jobs(self):
self._jobs = []
self._jobs_index = {}
def shutdown(self):
self.remove_all_jobs()
def _get_job_index(self, timestamp, job_id):
"""
Returns the index of the given job, or if it's not found, the index where the job should be
inserted based on the given timestamp.
:type timestamp: int
:type job_id: str
"""
lo, hi = 0, len(self._jobs)
timestamp = float("inf") if timestamp is None else timestamp
while lo < hi:
mid = (lo + hi) // 2
mid_job, mid_timestamp = self._jobs[mid]
mid_timestamp = float("inf") if mid_timestamp is None else mid_timestamp
if mid_timestamp > timestamp:
hi = mid
elif mid_timestamp < timestamp:
lo = mid + 1
elif mid_job.id > job_id:
hi = mid
elif mid_job.id < job_id:
lo = mid + 1
else:
return mid
return lo

View file

@ -0,0 +1,158 @@
import pickle
import warnings
from apscheduler.job import Job
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
from apscheduler.util import (
datetime_to_utc_timestamp,
maybe_ref,
utc_timestamp_to_datetime,
)
try:
from bson.binary import Binary
from pymongo import ASCENDING, MongoClient
from pymongo.errors import DuplicateKeyError
except ImportError as exc: # pragma: nocover
raise ImportError("MongoDBJobStore requires PyMongo installed") from exc
class MongoDBJobStore(BaseJobStore):
"""
Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to
pymongo's `MongoClient
<http://api.mongodb.org/python/current/api/pymongo/mongo_client.html#pymongo.mongo_client.MongoClient>`_.
Plugin alias: ``mongodb``
:param str database: database to store jobs in
:param str collection: collection to store jobs in
:param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of
providing connection arguments
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
"""
def __init__(
self,
database="apscheduler",
collection="jobs",
client=None,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
**connect_args,
):
super().__init__()
self.pickle_protocol = pickle_protocol
if not database:
raise ValueError('The "database" parameter must not be empty')
if not collection:
raise ValueError('The "collection" parameter must not be empty')
if client:
self.client = maybe_ref(client)
else:
connect_args.setdefault("w", 1)
self.client = MongoClient(**connect_args)
self.collection = self.client[database][collection]
def start(self, scheduler, alias):
super().start(scheduler, alias)
self.collection.create_index("next_run_time", sparse=True)
@property
def connection(self):
warnings.warn(
'The "connection" member is deprecated -- use "client" instead',
DeprecationWarning,
)
return self.client
def lookup_job(self, job_id):
document = self.collection.find_one(job_id, ["job_state"])
return self._reconstitute_job(document["job_state"]) if document else None
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
return self._get_jobs({"next_run_time": {"$lte": timestamp}})
def get_next_run_time(self):
document = self.collection.find_one(
{"next_run_time": {"$ne": None}},
projection=["next_run_time"],
sort=[("next_run_time", ASCENDING)],
)
return (
utc_timestamp_to_datetime(document["next_run_time"]) if document else None
)
def get_all_jobs(self):
jobs = self._get_jobs({})
self._fix_paused_jobs_sorting(jobs)
return jobs
def add_job(self, job):
try:
self.collection.insert_one(
{
"_id": job.id,
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": Binary(
pickle.dumps(job.__getstate__(), self.pickle_protocol)
),
}
)
except DuplicateKeyError:
raise ConflictingIdError(job.id)
def update_job(self, job):
changes = {
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol)),
}
result = self.collection.update_one({"_id": job.id}, {"$set": changes})
if result and result.matched_count == 0:
raise JobLookupError(job.id)
def remove_job(self, job_id):
result = self.collection.delete_one({"_id": job_id})
if result and result.deleted_count == 0:
raise JobLookupError(job_id)
def remove_all_jobs(self):
self.collection.delete_many({})
def shutdown(self):
self.client.close()
def _reconstitute_job(self, job_state):
job_state = pickle.loads(job_state)
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _get_jobs(self, conditions):
jobs = []
failed_job_ids = []
for document in self.collection.find(
conditions, ["_id", "job_state"], sort=[("next_run_time", ASCENDING)]
):
try:
jobs.append(self._reconstitute_job(document["job_state"]))
except BaseException:
self._logger.exception(
'Unable to restore job "%s" -- removing it', document["_id"]
)
failed_job_ids.append(document["_id"])
# Remove all the jobs we failed to restore
if failed_job_ids:
self.collection.delete_many({"_id": {"$in": failed_job_ids}})
return jobs
def __repr__(self):
return f"<{self.__class__.__name__} (client={self.client})>"

View file

@ -0,0 +1,160 @@
import pickle
from datetime import datetime, timezone
from apscheduler.job import Job
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime
try:
from redis import Redis
except ImportError as exc: # pragma: nocover
raise ImportError("RedisJobStore requires redis installed") from exc
class RedisJobStore(BaseJobStore):
"""
Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's
:class:`~redis.StrictRedis`.
Plugin alias: ``redis``
:param int db: the database number to store jobs in
:param str jobs_key: key to store jobs in
:param str run_times_key: key to store the jobs' run times in
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
"""
def __init__(
self,
db=0,
jobs_key="apscheduler.jobs",
run_times_key="apscheduler.run_times",
pickle_protocol=pickle.HIGHEST_PROTOCOL,
**connect_args,
):
super().__init__()
if db is None:
raise ValueError('The "db" parameter must not be empty')
if not jobs_key:
raise ValueError('The "jobs_key" parameter must not be empty')
if not run_times_key:
raise ValueError('The "run_times_key" parameter must not be empty')
self.pickle_protocol = pickle_protocol
self.jobs_key = jobs_key
self.run_times_key = run_times_key
self.redis = Redis(db=int(db), **connect_args)
def lookup_job(self, job_id):
job_state = self.redis.hget(self.jobs_key, job_id)
return self._reconstitute_job(job_state) if job_state else None
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
if job_ids:
job_states = self.redis.hmget(self.jobs_key, *job_ids)
return self._reconstitute_jobs(zip(job_ids, job_states))
return []
def get_next_run_time(self):
next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
if next_run_time:
return utc_timestamp_to_datetime(next_run_time[0][1])
def get_all_jobs(self):
job_states = self.redis.hgetall(self.jobs_key)
jobs = self._reconstitute_jobs(job_states.items())
paused_sort_key = datetime(9999, 12, 31, tzinfo=timezone.utc)
return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)
def add_job(self, job):
if self.redis.hexists(self.jobs_key, job.id):
raise ConflictingIdError(job.id)
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.hset(
self.jobs_key,
job.id,
pickle.dumps(job.__getstate__(), self.pickle_protocol),
)
if job.next_run_time:
pipe.zadd(
self.run_times_key,
{job.id: datetime_to_utc_timestamp(job.next_run_time)},
)
pipe.execute()
def update_job(self, job):
if not self.redis.hexists(self.jobs_key, job.id):
raise JobLookupError(job.id)
with self.redis.pipeline() as pipe:
pipe.hset(
self.jobs_key,
job.id,
pickle.dumps(job.__getstate__(), self.pickle_protocol),
)
if job.next_run_time:
pipe.zadd(
self.run_times_key,
{job.id: datetime_to_utc_timestamp(job.next_run_time)},
)
else:
pipe.zrem(self.run_times_key, job.id)
pipe.execute()
def remove_job(self, job_id):
if not self.redis.hexists(self.jobs_key, job_id):
raise JobLookupError(job_id)
with self.redis.pipeline() as pipe:
pipe.hdel(self.jobs_key, job_id)
pipe.zrem(self.run_times_key, job_id)
pipe.execute()
def remove_all_jobs(self):
with self.redis.pipeline() as pipe:
pipe.delete(self.jobs_key)
pipe.delete(self.run_times_key)
pipe.execute()
def shutdown(self):
self.redis.connection_pool.disconnect()
def _reconstitute_job(self, job_state):
job_state = pickle.loads(job_state)
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _reconstitute_jobs(self, job_states):
jobs = []
failed_job_ids = []
for job_id, job_state in job_states:
try:
jobs.append(self._reconstitute_job(job_state))
except BaseException:
self._logger.exception(
'Unable to restore job "%s" -- removing it', job_id
)
failed_job_ids.append(job_id)
# Remove all the jobs we failed to restore
if failed_job_ids:
with self.redis.pipeline() as pipe:
pipe.hdel(self.jobs_key, *failed_job_ids)
pipe.zrem(self.run_times_key, *failed_job_ids)
pipe.execute()
return jobs
def __repr__(self):
return f"<{self.__class__.__name__}>"

View file

@ -0,0 +1,173 @@
import pickle
from apscheduler.job import Job
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
from apscheduler.util import (
datetime_to_utc_timestamp,
maybe_ref,
utc_timestamp_to_datetime,
)
try:
from rethinkdb import RethinkDB
except ImportError as exc: # pragma: nocover
raise ImportError("RethinkDBJobStore requires rethinkdb installed") from exc
class RethinkDBJobStore(BaseJobStore):
"""
Stores jobs in a RethinkDB database. Any leftover keyword arguments are directly passed to
rethinkdb's `RethinkdbClient <http://www.rethinkdb.com/api/#connect>`_.
Plugin alias: ``rethinkdb``
:param str database: database to store jobs in
:param str collection: collection to store jobs in
:param client: a :class:`rethinkdb.net.Connection` instance to use instead of providing
connection arguments
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
"""
def __init__(
self,
database="apscheduler",
table="jobs",
client=None,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
**connect_args,
):
super().__init__()
if not database:
raise ValueError('The "database" parameter must not be empty')
if not table:
raise ValueError('The "table" parameter must not be empty')
self.database = database
self.table_name = table
self.table = None
self.client = client
self.pickle_protocol = pickle_protocol
self.connect_args = connect_args
self.r = RethinkDB()
self.conn = None
def start(self, scheduler, alias):
super().start(scheduler, alias)
if self.client:
self.conn = maybe_ref(self.client)
else:
self.conn = self.r.connect(db=self.database, **self.connect_args)
if self.database not in self.r.db_list().run(self.conn):
self.r.db_create(self.database).run(self.conn)
if self.table_name not in self.r.table_list().run(self.conn):
self.r.table_create(self.table_name).run(self.conn)
if "next_run_time" not in self.r.table(self.table_name).index_list().run(
self.conn
):
self.r.table(self.table_name).index_create("next_run_time").run(self.conn)
self.table = self.r.db(self.database).table(self.table_name)
def lookup_job(self, job_id):
results = list(self.table.get_all(job_id).pluck("job_state").run(self.conn))
return self._reconstitute_job(results[0]["job_state"]) if results else None
def get_due_jobs(self, now):
return self._get_jobs(
self.r.row["next_run_time"] <= datetime_to_utc_timestamp(now)
)
def get_next_run_time(self):
results = list(
self.table.filter(self.r.row["next_run_time"] != None)
.order_by(self.r.asc("next_run_time"))
.map(lambda x: x["next_run_time"])
.limit(1)
.run(self.conn)
)
return utc_timestamp_to_datetime(results[0]) if results else None
def get_all_jobs(self):
jobs = self._get_jobs()
self._fix_paused_jobs_sorting(jobs)
return jobs
def add_job(self, job):
job_dict = {
"id": job.id,
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": self.r.binary(
pickle.dumps(job.__getstate__(), self.pickle_protocol)
),
}
results = self.table.insert(job_dict).run(self.conn)
if results["errors"] > 0:
raise ConflictingIdError(job.id)
def update_job(self, job):
changes = {
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": self.r.binary(
pickle.dumps(job.__getstate__(), self.pickle_protocol)
),
}
results = self.table.get_all(job.id).update(changes).run(self.conn)
skipped = False in map(lambda x: results[x] == 0, results.keys())
if results["skipped"] > 0 or results["errors"] > 0 or not skipped:
raise JobLookupError(job.id)
def remove_job(self, job_id):
results = self.table.get_all(job_id).delete().run(self.conn)
if results["deleted"] + results["skipped"] != 1:
raise JobLookupError(job_id)
def remove_all_jobs(self):
self.table.delete().run(self.conn)
def shutdown(self):
self.conn.close()
def _reconstitute_job(self, job_state):
job_state = pickle.loads(job_state)
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _get_jobs(self, predicate=None):
jobs = []
failed_job_ids = []
query = (
self.table.filter(self.r.row["next_run_time"] != None).filter(predicate)
if predicate
else self.table
)
query = query.order_by("next_run_time", "id").pluck("id", "job_state")
for document in query.run(self.conn):
try:
jobs.append(self._reconstitute_job(document["job_state"]))
except Exception:
self._logger.exception(
'Unable to restore job "%s" -- removing it', document["id"]
)
failed_job_ids.append(document["id"])
# Remove all the jobs we failed to restore
if failed_job_ids:
self.r.expr(failed_job_ids).for_each(
lambda job_id: self.table.get_all(job_id).delete()
).run(self.conn)
return jobs
def __repr__(self):
connection = self.conn
return f"<{self.__class__.__name__} (connection={connection})>"

View file

@ -0,0 +1,194 @@
import pickle
from apscheduler.job import Job
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
from apscheduler.util import (
datetime_to_utc_timestamp,
maybe_ref,
utc_timestamp_to_datetime,
)
try:
from sqlalchemy import (
Column,
Float,
LargeBinary,
MetaData,
Table,
Unicode,
and_,
create_engine,
select,
)
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql.expression import null
except ImportError as exc: # pragma: nocover
raise ImportError("SQLAlchemyJobStore requires SQLAlchemy installed") from exc
class SQLAlchemyJobStore(BaseJobStore):
"""
Stores jobs in a database table using SQLAlchemy.
The table will be created if it doesn't exist in the database.
Plugin alias: ``sqlalchemy``
:param str url: connection string (see
:ref:`SQLAlchemy documentation <sqlalchemy:database_urls>` on this)
:param engine: an SQLAlchemy :class:`~sqlalchemy.engine.Engine` to use instead of creating a
new one based on ``url``
:param str tablename: name of the table to store jobs in
:param metadata: a :class:`~sqlalchemy.schema.MetaData` instance to use instead of creating a
new one
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
:param str tableschema: name of the (existing) schema in the target database where the table
should be
:param dict engine_options: keyword arguments to :func:`~sqlalchemy.create_engine`
(ignored if ``engine`` is given)
"""
def __init__(
self,
url=None,
engine=None,
tablename="apscheduler_jobs",
metadata=None,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
tableschema=None,
engine_options=None,
):
super().__init__()
self.pickle_protocol = pickle_protocol
metadata = maybe_ref(metadata) or MetaData()
if engine:
self.engine = maybe_ref(engine)
elif url:
self.engine = create_engine(url, **(engine_options or {}))
else:
raise ValueError('Need either "engine" or "url" defined')
# 191 = max key length in MySQL for InnoDB/utf8mb4 tables,
# 25 = precision that translates to an 8-byte float
self.jobs_t = Table(
tablename,
metadata,
Column("id", Unicode(191), primary_key=True),
Column("next_run_time", Float(25), index=True),
Column("job_state", LargeBinary, nullable=False),
schema=tableschema,
)
def start(self, scheduler, alias):
super().start(scheduler, alias)
self.jobs_t.create(self.engine, True)
def lookup_job(self, job_id):
selectable = select(self.jobs_t.c.job_state).where(self.jobs_t.c.id == job_id)
with self.engine.begin() as connection:
job_state = connection.execute(selectable).scalar()
return self._reconstitute_job(job_state) if job_state else None
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp)
def get_next_run_time(self):
selectable = (
select(self.jobs_t.c.next_run_time)
.where(self.jobs_t.c.next_run_time != null())
.order_by(self.jobs_t.c.next_run_time)
.limit(1)
)
with self.engine.begin() as connection:
next_run_time = connection.execute(selectable).scalar()
return utc_timestamp_to_datetime(next_run_time)
def get_all_jobs(self):
jobs = self._get_jobs()
self._fix_paused_jobs_sorting(jobs)
return jobs
def add_job(self, job):
insert = self.jobs_t.insert().values(
**{
"id": job.id,
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": pickle.dumps(job.__getstate__(), self.pickle_protocol),
}
)
with self.engine.begin() as connection:
try:
connection.execute(insert)
except IntegrityError:
raise ConflictingIdError(job.id)
def update_job(self, job):
update = (
self.jobs_t.update()
.values(
**{
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": pickle.dumps(job.__getstate__(), self.pickle_protocol),
}
)
.where(self.jobs_t.c.id == job.id)
)
with self.engine.begin() as connection:
result = connection.execute(update)
if result.rowcount == 0:
raise JobLookupError(job.id)
def remove_job(self, job_id):
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id)
with self.engine.begin() as connection:
result = connection.execute(delete)
if result.rowcount == 0:
raise JobLookupError(job_id)
def remove_all_jobs(self):
delete = self.jobs_t.delete()
with self.engine.begin() as connection:
connection.execute(delete)
def shutdown(self):
self.engine.dispose()
def _reconstitute_job(self, job_state):
job_state = pickle.loads(job_state)
job_state["jobstore"] = self
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _get_jobs(self, *conditions):
jobs = []
selectable = select(self.jobs_t.c.id, self.jobs_t.c.job_state).order_by(
self.jobs_t.c.next_run_time
)
selectable = selectable.where(and_(*conditions)) if conditions else selectable
failed_job_ids = set()
with self.engine.begin() as connection:
for row in connection.execute(selectable):
try:
jobs.append(self._reconstitute_job(row.job_state))
except BaseException:
self._logger.exception(
'Unable to restore job "%s" -- removing it', row.id
)
failed_job_ids.add(row.id)
# Remove all the jobs we failed to restore
if failed_job_ids:
delete = self.jobs_t.delete().where(
self.jobs_t.c.id.in_(failed_job_ids)
)
connection.execute(delete)
return jobs
def __repr__(self):
return f"<{self.__class__.__name__} (url={self.engine.url})>"

View file

@ -0,0 +1,197 @@
import pickle
from datetime import datetime, timezone
from kazoo.exceptions import NodeExistsError, NoNodeError
from apscheduler.job import Job
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
from apscheduler.util import (
datetime_to_utc_timestamp,
maybe_ref,
utc_timestamp_to_datetime,
)
try:
from kazoo.client import KazooClient
except ImportError as exc: # pragma: nocover
raise ImportError("ZooKeeperJobStore requires Kazoo installed") from exc
class ZooKeeperJobStore(BaseJobStore):
"""
Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to
kazoo's `KazooClient
<http://kazoo.readthedocs.io/en/latest/api/client.html>`_.
Plugin alias: ``zookeeper``
:param str path: path to store jobs in
:param client: a :class:`~kazoo.client.KazooClient` instance to use instead of
providing connection arguments
:param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
highest available
"""
def __init__(
self,
path="/apscheduler",
client=None,
close_connection_on_exit=False,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
**connect_args,
):
super().__init__()
self.pickle_protocol = pickle_protocol
self.close_connection_on_exit = close_connection_on_exit
if not path:
raise ValueError('The "path" parameter must not be empty')
self.path = path
if client:
self.client = maybe_ref(client)
else:
self.client = KazooClient(**connect_args)
self._ensured_path = False
def _ensure_paths(self):
if not self._ensured_path:
self.client.ensure_path(self.path)
self._ensured_path = True
def start(self, scheduler, alias):
super().start(scheduler, alias)
if not self.client.connected:
self.client.start()
def lookup_job(self, job_id):
self._ensure_paths()
node_path = self.path + "/" + str(job_id)
try:
content, _ = self.client.get(node_path)
doc = pickle.loads(content)
job = self._reconstitute_job(doc["job_state"])
return job
except BaseException:
return None
def get_due_jobs(self, now):
timestamp = datetime_to_utc_timestamp(now)
jobs = [
job_def["job"]
for job_def in self._get_jobs()
if job_def["next_run_time"] is not None
and job_def["next_run_time"] <= timestamp
]
return jobs
def get_next_run_time(self):
next_runs = [
job_def["next_run_time"]
for job_def in self._get_jobs()
if job_def["next_run_time"] is not None
]
return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None
def get_all_jobs(self):
jobs = [job_def["job"] for job_def in self._get_jobs()]
self._fix_paused_jobs_sorting(jobs)
return jobs
def add_job(self, job):
self._ensure_paths()
node_path = self.path + "/" + str(job.id)
value = {
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": job.__getstate__(),
}
data = pickle.dumps(value, self.pickle_protocol)
try:
self.client.create(node_path, value=data)
except NodeExistsError:
raise ConflictingIdError(job.id)
def update_job(self, job):
self._ensure_paths()
node_path = self.path + "/" + str(job.id)
changes = {
"next_run_time": datetime_to_utc_timestamp(job.next_run_time),
"job_state": job.__getstate__(),
}
data = pickle.dumps(changes, self.pickle_protocol)
try:
self.client.set(node_path, value=data)
except NoNodeError:
raise JobLookupError(job.id)
def remove_job(self, job_id):
self._ensure_paths()
node_path = self.path + "/" + str(job_id)
try:
self.client.delete(node_path)
except NoNodeError:
raise JobLookupError(job_id)
def remove_all_jobs(self):
try:
self.client.delete(self.path, recursive=True)
except NoNodeError:
pass
self._ensured_path = False
def shutdown(self):
if self.close_connection_on_exit:
self.client.stop()
self.client.close()
def _reconstitute_job(self, job_state):
job_state = job_state
job = Job.__new__(Job)
job.__setstate__(job_state)
job._scheduler = self._scheduler
job._jobstore_alias = self._alias
return job
def _get_jobs(self):
self._ensure_paths()
jobs = []
failed_job_ids = []
all_ids = self.client.get_children(self.path)
for node_name in all_ids:
try:
node_path = self.path + "/" + node_name
content, _ = self.client.get(node_path)
doc = pickle.loads(content)
job_def = {
"job_id": node_name,
"next_run_time": doc["next_run_time"]
if doc["next_run_time"]
else None,
"job_state": doc["job_state"],
"job": self._reconstitute_job(doc["job_state"]),
"creation_time": _.ctime,
}
jobs.append(job_def)
except BaseException:
self._logger.exception(
'Unable to restore job "%s" -- removing it', node_name
)
failed_job_ids.append(node_name)
# Remove all the jobs we failed to restore
if failed_job_ids:
for failed_id in failed_job_ids:
self.remove_job(failed_id)
paused_sort_key = datetime(9999, 12, 31, tzinfo=timezone.utc)
return sorted(
jobs,
key=lambda job_def: (
job_def["job"].next_run_time or paused_sort_key,
job_def["creation_time"],
),
)
def __repr__(self):
self._logger.exception("<%s (client=%s)>", self.__class__.__name__, self.client)
return f"<{self.__class__.__name__} (client={self.client})>"

View file

@ -0,0 +1,12 @@
class SchedulerAlreadyRunningError(Exception):
"""Raised when attempting to start or configure the scheduler when it's already running."""
def __str__(self):
return "Scheduler is already running"
class SchedulerNotRunningError(Exception):
"""Raised when attempting to shutdown the scheduler when it's not running."""
def __str__(self):
return "Scheduler is not running"

View file

@ -0,0 +1,67 @@
import asyncio
from functools import partial, wraps
from apscheduler.schedulers.base import BaseScheduler
from apscheduler.util import maybe_ref
def run_in_event_loop(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
wrapped = partial(func, self, *args, **kwargs)
self._eventloop.call_soon_threadsafe(wrapped)
return wrapper
class AsyncIOScheduler(BaseScheduler):
"""
A scheduler that runs on an asyncio (:pep:`3156`) event loop.
The default executor can run jobs based on native coroutines (``async def``).
Extra options:
============== =============================================================
``event_loop`` AsyncIO event loop to use (defaults to the global event loop)
============== =============================================================
"""
_eventloop = None
_timeout = None
def start(self, paused=False):
if not self._eventloop:
self._eventloop = asyncio.get_running_loop()
super().start(paused)
@run_in_event_loop
def shutdown(self, wait=True):
super().shutdown(wait)
self._stop_timer()
def _configure(self, config):
self._eventloop = maybe_ref(config.pop("event_loop", None))
super()._configure(config)
def _start_timer(self, wait_seconds):
self._stop_timer()
if wait_seconds is not None:
self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup)
def _stop_timer(self):
if self._timeout:
self._timeout.cancel()
del self._timeout
@run_in_event_loop
def wakeup(self):
self._stop_timer()
wait_seconds = self._process_jobs()
self._start_timer(wait_seconds)
def _create_default_executor(self):
from apscheduler.executors.asyncio import AsyncIOExecutor
return AsyncIOExecutor()

View file

@ -0,0 +1,42 @@
from threading import Event, Thread
from apscheduler.schedulers.base import BaseScheduler
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.util import asbool
class BackgroundScheduler(BlockingScheduler):
"""
A scheduler that runs in the background using a separate thread
(:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately).
Extra options:
========== =============================================================================
``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``, see
`the documentation
<https://docs.python.org/3.4/library/threading.html#thread-objects>`_
for further details)
========== =============================================================================
"""
_thread = None
def _configure(self, config):
self._daemon = asbool(config.pop("daemon", True))
super()._configure(config)
def start(self, *args, **kwargs):
if self._event is None or self._event.is_set():
self._event = Event()
BaseScheduler.start(self, *args, **kwargs)
self._thread = Thread(
target=self._main_loop, name="APScheduler", daemon=self._daemon
)
self._thread.start()
def shutdown(self, *args, **kwargs):
super().shutdown(*args, **kwargs)
self._thread.join()
del self._thread

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,33 @@
from threading import TIMEOUT_MAX, Event
from apscheduler.schedulers.base import STATE_STOPPED, BaseScheduler
class BlockingScheduler(BaseScheduler):
"""
A scheduler that runs in the foreground
(:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block).
"""
_event = None
def start(self, *args, **kwargs):
if self._event is None or self._event.is_set():
self._event = Event()
super().start(*args, **kwargs)
self._main_loop()
def shutdown(self, wait=True):
super().shutdown(wait)
self._event.set()
def _main_loop(self):
wait_seconds = TIMEOUT_MAX
while self.state != STATE_STOPPED:
self._event.wait(wait_seconds)
self._event.clear()
wait_seconds = self._process_jobs()
def wakeup(self):
self._event.set()

View file

@ -0,0 +1,34 @@
from apscheduler.schedulers.base import BaseScheduler
from apscheduler.schedulers.blocking import BlockingScheduler
try:
import gevent
from gevent.event import Event
from gevent.lock import RLock
except ImportError as exc: # pragma: nocover
raise ImportError("GeventScheduler requires gevent installed") from exc
class GeventScheduler(BlockingScheduler):
"""A scheduler that runs as a Gevent greenlet."""
_greenlet = None
def start(self, *args, **kwargs):
self._event = Event()
BaseScheduler.start(self, *args, **kwargs)
self._greenlet = gevent.spawn(self._main_loop)
return self._greenlet
def shutdown(self, *args, **kwargs):
super().shutdown(*args, **kwargs)
self._greenlet.join()
del self._greenlet
def _create_lock(self):
return RLock()
def _create_default_executor(self):
from apscheduler.executors.gevent import GeventExecutor
return GeventExecutor()

View file

@ -0,0 +1,44 @@
from importlib import import_module
from itertools import product
from apscheduler.schedulers.base import BaseScheduler
for version, pkgname in product(range(6, 1, -1), ("PySide", "PyQt")):
try:
qtcore = import_module(pkgname + str(version) + ".QtCore")
except ImportError:
pass
else:
QTimer = qtcore.QTimer
break
else:
raise ImportError("QtScheduler requires either PySide/PyQt (v6 to v2) installed")
class QtScheduler(BaseScheduler):
"""A scheduler that runs in a Qt event loop."""
_timer = None
def shutdown(self, *args, **kwargs):
super().shutdown(*args, **kwargs)
self._stop_timer()
def _start_timer(self, wait_seconds):
self._stop_timer()
if wait_seconds is not None:
wait_time = min(int(wait_seconds * 1000), 2147483647)
self._timer = QTimer.singleShot(wait_time, self._process_jobs)
def _stop_timer(self):
if self._timer:
if self._timer.isActive():
self._timer.stop()
del self._timer
def wakeup(self):
self._start_timer(0)
def _process_jobs(self):
wait_seconds = super()._process_jobs()
self._start_timer(wait_seconds)

View file

@ -0,0 +1,65 @@
from datetime import timedelta
from functools import wraps
from apscheduler.schedulers.base import BaseScheduler
from apscheduler.util import maybe_ref
try:
from tornado.ioloop import IOLoop
except ImportError as exc: # pragma: nocover
raise ImportError("TornadoScheduler requires tornado installed") from exc
def run_in_ioloop(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self._ioloop.add_callback(func, self, *args, **kwargs)
return wrapper
class TornadoScheduler(BaseScheduler):
"""
A scheduler that runs on a Tornado IOLoop.
The default executor can run jobs based on native coroutines (``async def``).
=========== ===============================================================
``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop)
=========== ===============================================================
"""
_ioloop = None
_timeout = None
@run_in_ioloop
def shutdown(self, wait=True):
super().shutdown(wait)
self._stop_timer()
def _configure(self, config):
self._ioloop = maybe_ref(config.pop("io_loop", None)) or IOLoop.current()
super()._configure(config)
def _start_timer(self, wait_seconds):
self._stop_timer()
if wait_seconds is not None:
self._timeout = self._ioloop.add_timeout(
timedelta(seconds=wait_seconds), self.wakeup
)
def _stop_timer(self):
if self._timeout:
self._ioloop.remove_timeout(self._timeout)
del self._timeout
def _create_default_executor(self):
from apscheduler.executors.tornado import TornadoExecutor
return TornadoExecutor()
@run_in_ioloop
def wakeup(self):
self._stop_timer()
wait_seconds = self._process_jobs()
self._start_timer(wait_seconds)

View file

@ -0,0 +1,62 @@
from functools import wraps
from apscheduler.schedulers.base import BaseScheduler
from apscheduler.util import maybe_ref
try:
from twisted.internet import reactor as default_reactor
except ImportError as exc: # pragma: nocover
raise ImportError("TwistedScheduler requires Twisted installed") from exc
def run_in_reactor(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self._reactor.callFromThread(func, self, *args, **kwargs)
return wrapper
class TwistedScheduler(BaseScheduler):
"""
A scheduler that runs on a Twisted reactor.
Extra options:
=========== ========================================================
``reactor`` Reactor instance to use (defaults to the global reactor)
=========== ========================================================
"""
_reactor = None
_delayedcall = None
def _configure(self, config):
self._reactor = maybe_ref(config.pop("reactor", default_reactor))
super()._configure(config)
@run_in_reactor
def shutdown(self, wait=True):
super().shutdown(wait)
self._stop_timer()
def _start_timer(self, wait_seconds):
self._stop_timer()
if wait_seconds is not None:
self._delayedcall = self._reactor.callLater(wait_seconds, self.wakeup)
def _stop_timer(self):
if self._delayedcall and self._delayedcall.active():
self._delayedcall.cancel()
del self._delayedcall
@run_in_reactor
def wakeup(self):
self._stop_timer()
wait_seconds = self._process_jobs()
self._start_timer(wait_seconds)
def _create_default_executor(self):
from apscheduler.executors.twisted import TwistedExecutor
return TwistedExecutor()

View file

@ -0,0 +1,35 @@
import random
from abc import ABCMeta, abstractmethod
from datetime import timedelta
class BaseTrigger(metaclass=ABCMeta):
"""Abstract base class that defines the interface that every trigger must implement."""
__slots__ = ()
@abstractmethod
def get_next_fire_time(self, previous_fire_time, now):
"""
Returns the next datetime to fire on, If no such datetime can be calculated, returns
``None``.
:param datetime.datetime previous_fire_time: the previous time the trigger was fired
:param datetime.datetime now: current datetime
"""
def _apply_jitter(self, next_fire_time, jitter, now):
"""
Randomize ``next_fire_time`` by adding a random value (the jitter).
:param datetime.datetime|None next_fire_time: next fire time without jitter applied. If
``None``, returns ``None``.
:param int|None jitter: maximum number of seconds to add to ``next_fire_time``
(if ``None`` or ``0``, returns ``next_fire_time``)
:param datetime.datetime now: current datetime
:return datetime.datetime|None: next fire time with a jitter.
"""
if next_fire_time is None or not jitter:
return next_fire_time
return next_fire_time + timedelta(seconds=random.uniform(0, jitter))

View file

@ -0,0 +1,186 @@
from __future__ import annotations
from datetime import date, datetime, time, timedelta, tzinfo
from typing import Any
from tzlocal import get_localzone
from apscheduler.triggers.base import BaseTrigger
from apscheduler.util import (
asdate,
astimezone,
timezone_repr,
)
class CalendarIntervalTrigger(BaseTrigger):
"""
Runs the task on specified calendar-based intervals always at the same exact time of
day.
When calculating the next date, the ``years`` and ``months`` parameters are first
added to the previous date while keeping the day of the month constant. This is
repeated until the resulting date is valid. After that, the ``weeks`` and ``days``
parameters are added to that date. Finally, the date is combined with the given time
(hour, minute, second) to form the final datetime.
This means that if the ``days`` or ``weeks`` parameters are not used, the task will
always be executed on the same day of the month at the same wall clock time,
assuming the date and time are valid.
If the resulting datetime is invalid due to a daylight saving forward shift, the
date is discarded and the process moves on to the next date. If instead the datetime
is ambiguous due to a backward DST shift, the earlier of the two resulting datetimes
is used.
If no previous run time is specified when requesting a new run time (like when
starting for the first time or resuming after being paused), ``start_date`` is used
as a reference and the next valid datetime equal to or later than the current time
will be returned. Otherwise, the next valid datetime starting from the previous run
time is returned, even if it's in the past.
.. warning:: Be wary of setting a start date near the end of the month (29. 31.)
if you have ``months`` specified in your interval, as this will skip the months
when those days do not exist. Likewise, setting the start date on the leap day
(February 29th) and having ``years`` defined may cause some years to be skipped.
Users are also discouraged from using a time inside the target timezone's DST
switching period (typically around 2 am) since a date could either be skipped or
repeated due to the specified wall clock time either occurring twice or not at
all.
:param years: number of years to wait
:param months: number of months to wait
:param weeks: number of weeks to wait
:param days: number of days to wait
:param hour: hour to run the task at
:param minute: minute to run the task at
:param second: second to run the task at
:param start_date: first date to trigger on (defaults to current date if omitted)
:param end_date: latest possible date to trigger on
:param timezone: time zone to use for calculating the next fire time (defaults
to scheduler timezone if created via the scheduler, otherwise the local time
zone)
:param jitter: delay the job execution by ``jitter`` seconds at most
"""
__slots__ = (
"years",
"months",
"weeks",
"days",
"start_date",
"end_date",
"timezone",
"jitter",
"_time",
)
def __init__(
self,
*,
years: int = 0,
months: int = 0,
weeks: int = 0,
days: int = 0,
hour: int = 0,
minute: int = 0,
second: int = 0,
start_date: date | str | None = None,
end_date: date | str | None = None,
timezone: str | tzinfo | None = None,
jitter: int | None = None,
):
if timezone:
self.timezone = astimezone(timezone)
else:
self.timezone = astimezone(get_localzone())
self.years = years
self.months = months
self.weeks = weeks
self.days = days
self.start_date = asdate(start_date) or date.today()
self.end_date = asdate(end_date)
self.jitter = jitter
self._time = time(hour, minute, second, tzinfo=self.timezone)
if self.years == self.months == self.weeks == self.days == 0:
raise ValueError("interval must be at least 1 day long")
if self.end_date and self.start_date > self.end_date:
raise ValueError("end_date cannot be earlier than start_date")
def get_next_fire_time(
self, previous_fire_time: datetime | None, now: datetime
) -> datetime | None:
while True:
if previous_fire_time:
year, month = previous_fire_time.year, previous_fire_time.month
while True:
month += self.months
year += self.years + (month - 1) // 12
month = (month - 1) % 12 + 1
try:
next_date = date(year, month, previous_fire_time.day)
except ValueError:
pass # Nonexistent date
else:
next_date += timedelta(self.days + self.weeks * 7)
break
else:
next_date = self.start_date
# Don't return any date past end_date
if self.end_date and next_date > self.end_date:
return None
# Combine the date with the designated time and normalize the result
timestamp = datetime.combine(next_date, self._time).timestamp()
next_time = datetime.fromtimestamp(timestamp, self.timezone)
# Check if the time is off due to normalization and a forward DST shift
if next_time.timetz() != self._time:
previous_fire_time = next_time.date()
else:
return self._apply_jitter(next_time, self.jitter, now)
def __getstate__(self) -> dict[str, Any]:
return {
"version": 1,
"interval": [self.years, self.months, self.weeks, self.days],
"time": [self._time.hour, self._time.minute, self._time.second],
"start_date": self.start_date,
"end_date": self.end_date,
"timezone": self.timezone,
"jitter": self.jitter,
}
def __setstate__(self, state: dict[str, Any]) -> None:
if state.get("version", 1) > 1:
raise ValueError(
f"Got serialized data for version {state['version']} of "
f"{self.__class__.__name__}, but only versions up to 1 can be handled"
)
self.years, self.months, self.weeks, self.days = state["interval"]
self.start_date = state["start_date"]
self.end_date = state["end_date"]
self.timezone = state["timezone"]
self.jitter = state["jitter"]
self._time = time(*state["time"], tzinfo=self.timezone)
def __repr__(self) -> str:
fields = []
for field in "years", "months", "weeks", "days":
value = getattr(self, field)
if value > 0:
fields.append(f"{field}={value}")
fields.append(f"time={self._time.isoformat()!r}")
fields.append(f"start_date='{self.start_date}'")
if self.end_date:
fields.append(f"end_date='{self.end_date}'")
fields.append(f"timezone={timezone_repr(self.timezone)!r}")
return f'{self.__class__.__name__}({", ".join(fields)})'

View file

@ -0,0 +1,114 @@
from apscheduler.triggers.base import BaseTrigger
from apscheduler.util import obj_to_ref, ref_to_obj
class BaseCombiningTrigger(BaseTrigger):
__slots__ = ("triggers", "jitter")
def __init__(self, triggers, jitter=None):
self.triggers = triggers
self.jitter = jitter
def __getstate__(self):
return {
"version": 1,
"triggers": [
(obj_to_ref(trigger.__class__), trigger.__getstate__())
for trigger in self.triggers
],
"jitter": self.jitter,
}
def __setstate__(self, state):
if state.get("version", 1) > 1:
raise ValueError(
f"Got serialized data for version {state['version']} of "
f"{self.__class__.__name__}, but only versions up to 1 can be handled"
)
self.jitter = state["jitter"]
self.triggers = []
for clsref, state in state["triggers"]:
cls = ref_to_obj(clsref)
trigger = cls.__new__(cls)
trigger.__setstate__(state)
self.triggers.append(trigger)
def __repr__(self):
return "<{}({}{})>".format(
self.__class__.__name__,
self.triggers,
f", jitter={self.jitter}" if self.jitter else "",
)
class AndTrigger(BaseCombiningTrigger):
"""
Always returns the earliest next fire time that all the given triggers can agree on.
The trigger is considered to be finished when any of the given triggers has finished its
schedule.
Trigger alias: ``and``
.. warning:: This trigger should only be used to combine triggers that fire on
specific times of day, such as
:class:`~apscheduler.triggers.cron.CronTrigger` and
class:`~apscheduler.triggers.calendarinterval.CalendarIntervalTrigger`.
Attempting to use it with
:class:`~apscheduler.triggers.interval.IntervalTrigger` will likely result in
the scheduler hanging as it tries to find a fire time that matches exactly
between fire times produced by all the given triggers.
:param list triggers: triggers to combine
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
"""
__slots__ = ()
def get_next_fire_time(self, previous_fire_time, now):
while True:
fire_times = [
trigger.get_next_fire_time(previous_fire_time, now)
for trigger in self.triggers
]
if None in fire_times:
return None
elif min(fire_times) == max(fire_times):
return self._apply_jitter(fire_times[0], self.jitter, now)
else:
now = max(fire_times)
def __str__(self):
return "and[{}]".format(", ".join(str(trigger) for trigger in self.triggers))
class OrTrigger(BaseCombiningTrigger):
"""
Always returns the earliest next fire time produced by any of the given triggers.
The trigger is considered finished when all the given triggers have finished their schedules.
Trigger alias: ``or``
:param list triggers: triggers to combine
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
.. note:: Triggers that depends on the previous fire time, such as the interval trigger, may
seem to behave strangely since they are always passed the previous fire time produced by
any of the given triggers.
"""
__slots__ = ()
def get_next_fire_time(self, previous_fire_time, now):
fire_times = [
trigger.get_next_fire_time(previous_fire_time, now)
for trigger in self.triggers
]
fire_times = [fire_time for fire_time in fire_times if fire_time is not None]
if fire_times:
return self._apply_jitter(min(fire_times), self.jitter, now)
else:
return None
def __str__(self):
return "or[{}]".format(", ".join(str(trigger) for trigger in self.triggers))

View file

@ -0,0 +1,289 @@
from datetime import datetime, timedelta
from tzlocal import get_localzone
from apscheduler.triggers.base import BaseTrigger
from apscheduler.triggers.cron.fields import (
DEFAULT_VALUES,
BaseField,
DayOfMonthField,
DayOfWeekField,
MonthField,
WeekField,
)
from apscheduler.util import (
astimezone,
convert_to_datetime,
datetime_ceil,
datetime_repr,
)
class CronTrigger(BaseTrigger):
"""
Triggers when current time matches all specified time constraints,
similarly to how the UNIX cron scheduler works.
:param int|str year: 4-digit year
:param int|str month: month (1-12)
:param int|str day: day of month (1-31)
:param int|str week: ISO week (1-53)
:param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
:param int|str hour: hour (0-23)
:param int|str minute: minute (0-59)
:param int|str second: second (0-59)
:param datetime|str start_date: earliest possible date/time to trigger on (inclusive)
:param datetime|str end_date: latest possible date/time to trigger on (inclusive)
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults
to scheduler timezone)
:param int|None jitter: delay the job execution by ``jitter`` seconds at most
.. note:: The first weekday is always **monday**.
"""
FIELD_NAMES = (
"year",
"month",
"day",
"week",
"day_of_week",
"hour",
"minute",
"second",
)
FIELDS_MAP = {
"year": BaseField,
"month": MonthField,
"week": WeekField,
"day": DayOfMonthField,
"day_of_week": DayOfWeekField,
"hour": BaseField,
"minute": BaseField,
"second": BaseField,
}
__slots__ = "timezone", "start_date", "end_date", "fields", "jitter"
def __init__(
self,
year=None,
month=None,
day=None,
week=None,
day_of_week=None,
hour=None,
minute=None,
second=None,
start_date=None,
end_date=None,
timezone=None,
jitter=None,
):
if timezone:
self.timezone = astimezone(timezone)
elif isinstance(start_date, datetime) and start_date.tzinfo:
self.timezone = astimezone(start_date.tzinfo)
elif isinstance(end_date, datetime) and end_date.tzinfo:
self.timezone = astimezone(end_date.tzinfo)
else:
self.timezone = get_localzone()
self.start_date = convert_to_datetime(start_date, self.timezone, "start_date")
self.end_date = convert_to_datetime(end_date, self.timezone, "end_date")
self.jitter = jitter
values = dict(
(key, value)
for (key, value) in locals().items()
if key in self.FIELD_NAMES and value is not None
)
self.fields = []
assign_defaults = False
for field_name in self.FIELD_NAMES:
if field_name in values:
exprs = values.pop(field_name)
is_default = False
assign_defaults = not values
elif assign_defaults:
exprs = DEFAULT_VALUES[field_name]
is_default = True
else:
exprs = "*"
is_default = True
field_class = self.FIELDS_MAP[field_name]
field = field_class(field_name, exprs, is_default)
self.fields.append(field)
@classmethod
def from_crontab(cls, expr, timezone=None):
"""
Create a :class:`~CronTrigger` from a standard crontab expression.
See https://en.wikipedia.org/wiki/Cron for more information on the format accepted here.
:param expr: minute, hour, day of month, month, day of week
:param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (
defaults to scheduler timezone)
:return: a :class:`~CronTrigger` instance
"""
values = expr.split()
if len(values) != 5:
raise ValueError(f"Wrong number of fields; got {len(values)}, expected 5")
return cls(
minute=values[0],
hour=values[1],
day=values[2],
month=values[3],
day_of_week=values[4],
timezone=timezone,
)
def _increment_field_value(self, dateval, fieldnum):
"""
Increments the designated field and resets all less significant fields to their minimum
values.
:type dateval: datetime
:type fieldnum: int
:return: a tuple containing the new date, and the number of the field that was actually
incremented
:rtype: tuple
"""
values = {}
i = 0
while i < len(self.fields):
field = self.fields[i]
if not field.REAL:
if i == fieldnum:
fieldnum -= 1
i -= 1
else:
i += 1
continue
if i < fieldnum:
values[field.name] = field.get_value(dateval)
i += 1
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
i += 1
else:
value = field.get_value(dateval)
maxval = field.get_max(dateval)
if value == maxval:
fieldnum -= 1
i -= 1
else:
values[field.name] = value + 1
i += 1
difference = datetime(**values) - dateval.replace(tzinfo=None)
dateval = datetime.fromtimestamp(
dateval.timestamp() + difference.total_seconds(), self.timezone
)
return dateval, fieldnum
def _set_field_value(self, dateval, fieldnum, new_value):
values = {}
for i, field in enumerate(self.fields):
if field.REAL:
if i < fieldnum:
values[field.name] = field.get_value(dateval)
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
else:
values[field.name] = new_value
return datetime(**values, tzinfo=self.timezone, fold=dateval.fold)
def get_next_fire_time(self, previous_fire_time, now):
if previous_fire_time:
start_date = min(now, previous_fire_time + timedelta(microseconds=1))
if start_date == previous_fire_time:
start_date += timedelta(microseconds=1)
else:
start_date = max(now, self.start_date) if self.start_date else now
fieldnum = 0
next_date = datetime_ceil(start_date).astimezone(self.timezone)
while 0 <= fieldnum < len(self.fields):
field = self.fields[fieldnum]
curr_value = field.get_value(next_date)
next_value = field.get_next_value(next_date)
if next_value is None:
# No valid value was found
next_date, fieldnum = self._increment_field_value(
next_date, fieldnum - 1
)
elif next_value > curr_value:
# A valid, but higher than the starting value, was found
if field.REAL:
next_date = self._set_field_value(next_date, fieldnum, next_value)
fieldnum += 1
else:
next_date, fieldnum = self._increment_field_value(
next_date, fieldnum
)
else:
# A valid value was found, no changes necessary
fieldnum += 1
# Return if the date has rolled past the end date
if self.end_date and next_date > self.end_date:
return None
if fieldnum >= 0:
next_date = self._apply_jitter(next_date, self.jitter, now)
return min(next_date, self.end_date) if self.end_date else next_date
def __getstate__(self):
return {
"version": 2,
"timezone": self.timezone,
"start_date": self.start_date,
"end_date": self.end_date,
"fields": self.fields,
"jitter": self.jitter,
}
def __setstate__(self, state):
# This is for compatibility with APScheduler 3.0.x
if isinstance(state, tuple):
state = state[1]
if state.get("version", 1) > 2:
raise ValueError(
f"Got serialized data for version {state['version']} of "
f"{self.__class__.__name__}, but only versions up to 2 can be handled"
)
self.timezone = astimezone(state["timezone"])
self.start_date = state["start_date"]
self.end_date = state["end_date"]
self.fields = state["fields"]
self.jitter = state.get("jitter")
def __str__(self):
options = [f"{f.name}='{f}'" for f in self.fields if not f.is_default]
return "cron[{}]".format(", ".join(options))
def __repr__(self):
options = [f"{f.name}='{f}'" for f in self.fields if not f.is_default]
if self.start_date:
options.append(f"start_date={datetime_repr(self.start_date)!r}")
if self.end_date:
options.append(f"end_date={datetime_repr(self.end_date)!r}")
if self.jitter:
options.append(f"jitter={self.jitter}")
return "<{} ({}, timezone='{}')>".format(
self.__class__.__name__,
", ".join(options),
self.timezone,
)

Some files were not shown because too many files have changed in this diff Show more