diff --git a/.gitignore b/.gitignore
index ee342e9..e3a0b9e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,10 @@
*.pyc
build/
.coverage
+*.report
*.egg-info
logs/
-dist/
\ No newline at end of file
+dist/
+*.swp
+*.swo
+.tox/
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..3a1cd4e
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,14 @@
+language: python
+python:
+ - "2.6"
+ - "2.7"
+ - "3.3"
+ - "3.4"
+# - "pypy"
+# command to install dependencies
+install:
+ - python setup.py install
+# command to run tests
+script: python setup.py test
+services:
+ - redis-server
diff --git a/AUTHORS.md b/AUTHORS.md
index 68fc934..89cf71b 100644
--- a/AUTHORS.md
+++ b/AUTHORS.md
@@ -1,11 +1,24 @@
## Authors
* Matt George
-* Chris Song
-* yashh
-* dsc
-* Alex Ezell
+* Craig Hawco
* Michael Russo
-* Whit Morris
+* Chris Song
+* Whit Morriss
* Joe Shaw
+* Yashwanth Nelapati
+* Cezar Sa Espinola
+* Alex Ezell
+* Christy O'Reilly
+* Kevin McConnell
+* Bernardo Heynemann
+* David Schoonover
+* Rob Hudson
+* Salimane Adjao Moustapha
+* John Hobbs
+* James M. Henderson
+* Iraê Carvalho
+* Fabien Reboia
+* Peter Teichman
+
Inspired by Resque, by Chris Wanstrath
diff --git a/HISTORY.md b/HISTORY.md
index 40d369d..7236716 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -1,3 +1,27 @@
+##1.4.2 (2013-06-21)
+* __str__ returns correctly with dsn
+* worker_pids returns correct set of workers
+* workers are re-registered on every job
+* add exception metadata for after_perform method
+* logger no longer overrides root logger
+* support for redis db in dsn
+
+##1.4.1 (2012-07-30)
+* fix for non existent system signal for linux
+* cleanup of setup.py and requirements
+
+##1.4 (2012-06-?)
+* added hooks for before and after perform methods
+* fixed logging
+*fixed problems with password authentication
+
+##1.3 (2012-06-01)
+* remove resweb from pyres
+* resweb is now available at http://github.com/Pyres/resweb or on pypi
+
+##1.2
+* release with changes from pull requests
+
##1.1 (2011-06-16)
* api change based on redis-py
* setproctitle requirements fix
diff --git a/LICENSE b/LICENSE
index 22c6364..f9725b0 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 Matt George
+Copyright (c) 2009-2013 Matt George
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/MANIFEST.in b/MANIFEST.in
index 326432f..f9bd145 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1 +1 @@
-recursive-include resweb *.mustache media/*
+include requirements.txt
diff --git a/README.markdown b/README.markdown
index 0f07681..ef8d07c 100644
--- a/README.markdown
+++ b/README.markdown
@@ -8,6 +8,10 @@ Pyres - a Resque clone
Because of some differences between ruby and python, there are a couple of places where I chose speed over correctness. The goal will be to eventually take the application and make it more pythonic without sacrificing the awesome functionality found in resque. At the same time, I hope to stay within the bounds of the original api and web interface.
+## Travis CI
+
+Currently, pyres is being tested via travis ci for python version 2.6, 2.7, and 3.3:
+[![Build Status](https://secure.travis-ci.org/binarydud/pyres.png)](http://travis-ci.org/binarydud/pyres)
## Running Tests
diff --git a/coverage.report b/coverage.report
deleted file mode 100644
index d9c33d7..0000000
--- a/coverage.report
+++ /dev/null
@@ -1,11 +0,0 @@
-Name Stmts Exec Cover Missing
-------------------------------------------------
-pyres 138 131 94% 26, 39, 98, 133-134, 144-145
-pyres.exceptions 2 2 100%
-pyres.failure 23 22 95% 41
-pyres.job 23 23 100%
-pyres.worker 189 146 77% 66, 74, 84-112, 161, 179, 186, 230-241
-------------------------------------------------
-TOTAL 375 324 86%
-----------------------------------------------------------------------
-Ran 32 tests in 0.884s
\ No newline at end of file
diff --git a/docs/source/_theme/flask/layout.html b/docs/source/_theme/flask/layout.html
new file mode 100644
index 0000000..5caa4e2
--- /dev/null
+++ b/docs/source/_theme/flask/layout.html
@@ -0,0 +1,25 @@
+{%- extends "basic/layout.html" %}
+{%- block extrahead %}
+ {{ super() }}
+ {% if theme_touch_icon %}
+
+ {% endif %}
+
+{% endblock %}
+{%- block relbar2 %}{% endblock %}
+{% block header %}
+ {{ super() }}
+ {% if pagename == 'index' %}
+
+ {% endif %}
+{% endblock %}
+{%- block footer %}
+
+ {% if pagename == 'index' %}
+
+ {% endif %}
+{%- endblock %}
diff --git a/docs/source/_theme/flask/relations.html b/docs/source/_theme/flask/relations.html
new file mode 100644
index 0000000..3bbcde8
--- /dev/null
+++ b/docs/source/_theme/flask/relations.html
@@ -0,0 +1,19 @@
+Related Topics
+
diff --git a/docs/source/_theme/flask/static/flasky.css_t b/docs/source/_theme/flask/static/flasky.css_t
new file mode 100644
index 0000000..b5ca39b
--- /dev/null
+++ b/docs/source/_theme/flask/static/flasky.css_t
@@ -0,0 +1,395 @@
+/*
+ * flasky.css_t
+ * ~~~~~~~~~~~~
+ *
+ * :copyright: Copyright 2010 by Armin Ronacher.
+ * :license: Flask Design License, see LICENSE for details.
+ */
+
+{% set page_width = '940px' %}
+{% set sidebar_width = '220px' %}
+
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: 'Georgia', serif;
+ font-size: 17px;
+ background-color: white;
+ color: #000;
+ margin: 0;
+ padding: 0;
+}
+
+div.document {
+ width: {{ page_width }};
+ margin: 30px auto 0 auto;
+}
+
+div.documentwrapper {
+ float: left;
+ width: 100%;
+}
+
+div.bodywrapper {
+ margin: 0 0 0 {{ sidebar_width }};
+}
+
+div.sphinxsidebar {
+ width: {{ sidebar_width }};
+}
+
+hr {
+ border: 1px solid #B1B4B6;
+}
+
+div.body {
+ background-color: #ffffff;
+ color: #3E4349;
+ padding: 0 30px 0 30px;
+}
+
+img.floatingflask {
+ padding: 0 0 10px 10px;
+ float: right;
+}
+
+div.footer {
+ width: {{ page_width }};
+ margin: 20px auto 30px auto;
+ font-size: 14px;
+ color: #888;
+ text-align: right;
+}
+
+div.footer a {
+ color: #888;
+}
+
+div.related {
+ display: none;
+}
+
+div.sphinxsidebar a {
+ color: #444;
+ text-decoration: none;
+ border-bottom: 1px dotted #999;
+}
+
+div.sphinxsidebar a:hover {
+ border-bottom: 1px solid #999;
+}
+
+div.sphinxsidebar {
+ font-size: 14px;
+ line-height: 1.5;
+}
+
+div.sphinxsidebarwrapper {
+ padding: 18px 10px;
+}
+
+div.sphinxsidebarwrapper p.logo {
+ padding: 0 0 20px 0;
+ margin: 0;
+ text-align: center;
+}
+
+div.sphinxsidebar h3,
+div.sphinxsidebar h4 {
+ font-family: 'Garamond', 'Georgia', serif;
+ color: #444;
+ font-size: 24px;
+ font-weight: normal;
+ margin: 0 0 5px 0;
+ padding: 0;
+}
+
+div.sphinxsidebar h4 {
+ font-size: 20px;
+}
+
+div.sphinxsidebar h3 a {
+ color: #444;
+}
+
+div.sphinxsidebar p.logo a,
+div.sphinxsidebar h3 a,
+div.sphinxsidebar p.logo a:hover,
+div.sphinxsidebar h3 a:hover {
+ border: none;
+}
+
+div.sphinxsidebar p {
+ color: #555;
+ margin: 10px 0;
+}
+
+div.sphinxsidebar ul {
+ margin: 10px 0;
+ padding: 0;
+ color: #000;
+}
+
+div.sphinxsidebar input {
+ border: 1px solid #ccc;
+ font-family: 'Georgia', serif;
+ font-size: 1em;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+a {
+ color: #004B6B;
+ text-decoration: underline;
+}
+
+a:hover {
+ color: #6D4100;
+ text-decoration: underline;
+}
+
+div.body h1,
+div.body h2,
+div.body h3,
+div.body h4,
+div.body h5,
+div.body h6 {
+ font-family: 'Garamond', 'Georgia', serif;
+ font-weight: normal;
+ margin: 30px 0px 10px 0px;
+ padding: 0;
+}
+
+{% if theme_index_logo %}
+div.indexwrapper h1 {
+ text-indent: -999999px;
+ background: url({{ theme_index_logo }}) no-repeat center center;
+ height: {{ theme_index_logo_height }};
+}
+{% endif %}
+
+div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; }
+div.body h2 { font-size: 180%; }
+div.body h3 { font-size: 150%; }
+div.body h4 { font-size: 130%; }
+div.body h5 { font-size: 100%; }
+div.body h6 { font-size: 100%; }
+
+a.headerlink {
+ color: #ddd;
+ padding: 0 4px;
+ text-decoration: none;
+}
+
+a.headerlink:hover {
+ color: #444;
+ background: #eaeaea;
+}
+
+div.body p, div.body dd, div.body li {
+ line-height: 1.4em;
+}
+
+div.admonition {
+ background: #fafafa;
+ margin: 20px -30px;
+ padding: 10px 30px;
+ border-top: 1px solid #ccc;
+ border-bottom: 1px solid #ccc;
+}
+
+div.admonition tt.xref, div.admonition a tt {
+ border-bottom: 1px solid #fafafa;
+}
+
+dd div.admonition {
+ margin-left: -60px;
+ padding-left: 60px;
+}
+
+div.admonition p.admonition-title {
+ font-family: 'Garamond', 'Georgia', serif;
+ font-weight: normal;
+ font-size: 24px;
+ margin: 0 0 10px 0;
+ padding: 0;
+ line-height: 1;
+}
+
+div.admonition p.last {
+ margin-bottom: 0;
+}
+
+div.highlight {
+ background-color: white;
+}
+
+dt:target, .highlight {
+ background: #FAF3E8;
+}
+
+div.note {
+ background-color: #eee;
+ border: 1px solid #ccc;
+}
+
+div.seealso {
+ background-color: #ffc;
+ border: 1px solid #ff6;
+}
+
+div.topic {
+ background-color: #eee;
+}
+
+p.admonition-title {
+ display: inline;
+}
+
+p.admonition-title:after {
+ content: ":";
+}
+
+pre, tt {
+ font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.9em;
+}
+
+img.screenshot {
+}
+
+tt.descname, tt.descclassname {
+ font-size: 0.95em;
+}
+
+tt.descname {
+ padding-right: 0.08em;
+}
+
+img.screenshot {
+ -moz-box-shadow: 2px 2px 4px #eee;
+ -webkit-box-shadow: 2px 2px 4px #eee;
+ box-shadow: 2px 2px 4px #eee;
+}
+
+table.docutils {
+ border: 1px solid #888;
+ -moz-box-shadow: 2px 2px 4px #eee;
+ -webkit-box-shadow: 2px 2px 4px #eee;
+ box-shadow: 2px 2px 4px #eee;
+}
+
+table.docutils td, table.docutils th {
+ border: 1px solid #888;
+ padding: 0.25em 0.7em;
+}
+
+table.field-list, table.footnote {
+ border: none;
+ -moz-box-shadow: none;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+
+table.footnote {
+ margin: 15px 0;
+ width: 100%;
+ border: 1px solid #eee;
+ background: #fdfdfd;
+ font-size: 0.9em;
+}
+
+table.footnote + table.footnote {
+ margin-top: -15px;
+ border-top: none;
+}
+
+table.field-list th {
+ padding: 0 0.8em 0 0;
+}
+
+table.field-list td {
+ padding: 0;
+}
+
+table.footnote td.label {
+ width: 0px;
+ padding: 0.3em 0 0.3em 0.5em;
+}
+
+table.footnote td {
+ padding: 0.3em 0.5em;
+}
+
+dl {
+ margin: 0;
+ padding: 0;
+}
+
+dl dd {
+ margin-left: 30px;
+}
+
+blockquote {
+ margin: 0 0 0 30px;
+ padding: 0;
+}
+
+ul, ol {
+ margin: 10px 0 10px 30px;
+ padding: 0;
+}
+
+pre {
+ background: #eee;
+ padding: 7px 30px;
+ margin: 15px -30px;
+ line-height: 1.3em;
+}
+
+dl pre, blockquote pre, li pre {
+ margin-left: -60px;
+ padding-left: 60px;
+}
+
+dl dl pre {
+ margin-left: -90px;
+ padding-left: 90px;
+}
+
+tt {
+ background-color: #ecf0f3;
+ color: #222;
+ /* padding: 1px 2px; */
+}
+
+tt.xref, a tt {
+ background-color: #FBFBFB;
+ border-bottom: 1px solid white;
+}
+
+a.reference {
+ text-decoration: none;
+ border-bottom: 1px dotted #004B6B;
+}
+
+a.reference:hover {
+ border-bottom: 1px solid #6D4100;
+}
+
+a.footnote-reference {
+ text-decoration: none;
+ font-size: 0.7em;
+ vertical-align: top;
+ border-bottom: 1px dotted #004B6B;
+}
+
+a.footnote-reference:hover {
+ border-bottom: 1px solid #6D4100;
+}
+
+a:hover tt {
+ background: #EEE;
+}
diff --git a/docs/source/_theme/flask/static/small_flask.css b/docs/source/_theme/flask/static/small_flask.css
new file mode 100644
index 0000000..1c6df30
--- /dev/null
+++ b/docs/source/_theme/flask/static/small_flask.css
@@ -0,0 +1,70 @@
+/*
+ * small_flask.css_t
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * :copyright: Copyright 2010 by Armin Ronacher.
+ * :license: Flask Design License, see LICENSE for details.
+ */
+
+body {
+ margin: 0;
+ padding: 20px 30px;
+}
+
+div.documentwrapper {
+ float: none;
+ background: white;
+}
+
+div.sphinxsidebar {
+ display: block;
+ float: none;
+ width: 102.5%;
+ margin: 50px -30px -20px -30px;
+ padding: 10px 20px;
+ background: #333;
+ color: white;
+}
+
+div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p,
+div.sphinxsidebar h3 a {
+ color: white;
+}
+
+div.sphinxsidebar a {
+ color: #aaa;
+}
+
+div.sphinxsidebar p.logo {
+ display: none;
+}
+
+div.document {
+ width: 100%;
+ margin: 0;
+}
+
+div.related {
+ display: block;
+ margin: 0;
+ padding: 10px 0 20px 0;
+}
+
+div.related ul,
+div.related ul li {
+ margin: 0;
+ padding: 0;
+}
+
+div.footer {
+ display: none;
+}
+
+div.bodywrapper {
+ margin: 0;
+}
+
+div.body {
+ min-height: 0;
+ padding: 0;
+}
diff --git a/docs/source/_theme/flask/theme.conf b/docs/source/_theme/flask/theme.conf
new file mode 100644
index 0000000..18c720f
--- /dev/null
+++ b/docs/source/_theme/flask/theme.conf
@@ -0,0 +1,9 @@
+[theme]
+inherit = basic
+stylesheet = flasky.css
+pygments_style = flask_theme_support.FlaskyStyle
+
+[options]
+index_logo = ''
+index_logo_height = 120px
+touch_icon =
diff --git a/docs/source/class.rst b/docs/source/class.rst
index 7782ae8..6121909 100644
--- a/docs/source/class.rst
+++ b/docs/source/class.rst
@@ -10,7 +10,7 @@ Job Classes
=================
.. autoclass:: pyres.job.Job
- :members:
+ :members:
Worker Classes
=================
@@ -25,4 +25,4 @@ Failure Classes
:members:
.. autoclass:: pyres.failure.RedisBackend
- :members:
\ No newline at end of file
+ :members:
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 6962aa8..545ed07 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -12,7 +12,6 @@
# serve to show the default.
import sys, os
-from pyres import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
@@ -39,16 +38,16 @@
# General information about the project.
project = u'pyres'
-copyright = u'2010, Matt George'
+copyright = u'2012, Matt George'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = __version__
+version = '1.3'
# The full version, including alpha/beta/rc tags.
-release = __version__
+release = '1.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -92,7 +91,9 @@
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
-html_theme = 'default'
+sys.path.append(os.path.abspath('_theme'))
+html_theme_path = ['_theme']
+html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
@@ -194,5 +195,3 @@
# If false, no module index is generated.
#latex_use_modindex = True
-html_theme = "nature"
-html_theme_path = ["_theme"]
\ No newline at end of file
diff --git a/docs/source/example.rst b/docs/source/example.rst
index 4c67e23..33d8c93 100644
--- a/docs/source/example.rst
+++ b/docs/source/example.rst
@@ -1,7 +1,7 @@
Example
=========
-Let's take a real wold example of a blog where comments need to be checked for
+Let's take a real world example of a blog where comments need to be checked for
spam. When the comment is saved in the database, we create a job in the
queue with that comment data. Let's take a django model in this case.
@@ -33,7 +33,9 @@ You can convert your existing class to be compatible with pyres. All you need
to do is add a :attr:`queue` attribute and define a :meth:`perform` method
on the class.
-To insert a job into the queue you need to do something like this::
+To insert a job into the queue you need to do something like this:
+
+.. code-block:: python
>>> from pyres import ResQ
>>> r = ResQ()
@@ -47,4 +49,3 @@ In the **scripts** folder there is an executable::
Just pass a comma separated list of queues the worker should poll.
-
diff --git a/pyres/__init__.py b/pyres/__init__.py
index fbf5e09..011cd88 100644
--- a/pyres/__init__.py
+++ b/pyres/__init__.py
@@ -1,6 +1,7 @@
-__version__ = '1.1'
+__version__ = '1.5'
from redis import Redis
+from pyres.compat import string_types
import pyres.json_parser as json
import os
@@ -8,6 +9,8 @@
import sys
import logging
+logger = logging.getLogger(__name__)
+
def special_log_file(filename):
if filename in ("stderr", "stdout"):
return True
@@ -20,7 +23,7 @@ def get_logging_handler(filename, procname, namespace=None):
message_format = namespace + ': %(message)s'
else:
message_format = '%(message)s'
- format = '%(asctime)s %(levelname)-8s ' + message_format
+ format = '%(asctime)s %(process)5d %(levelname)-8s ' + message_format
if not filename:
filename = "stderr"
@@ -56,7 +59,8 @@ def get_logging_handler(filename, procname, namespace=None):
def setup_logging(procname, log_level=logging.INFO, filename=None):
if log_level == logging.NOTSET:
return
- logger = logging.getLogger()
+ main_package = __name__.split('.', 1)[0] if '.' in __name__ else __name__
+ logger = logging.getLogger(main_package)
logger.setLevel(log_level)
handler = get_logging_handler(filename, procname)
logger.addHandler(handler)
@@ -86,6 +90,15 @@ def safe_str_to_class(s):
klass = lst[-1]
mod_list = lst[:-1]
module = ".".join(mod_list)
+
+ # ruby compatibility kludge: resque sends just a class name and
+ # not a module name so if I use resque to queue a ruby class
+ # called "Worker" then pyres will throw a "ValueError: Empty
+ # module name" exception. To avoid that, if there's no module in
+ # the json then we'll use the classname as a module name.
+ if not module:
+ module = klass
+
mod = my_import(module)
if hasattr(mod, klass):
return getattr(mod, klass)
@@ -113,18 +126,10 @@ class ResQ(object):
The ``__init__`` takes these keyword arguments:
- ``server`` -- IP address and port of the Redis server to which you want to connect. Default is `localhost:6379`.
+ ``server`` -- IP address and port of the Redis server to which you want to connect, and optional Redis DB number. Default is `localhost:6379`.
``password`` -- The password, if required, of your Redis server. Default is "None".
- ``timeout`` -- The timeout keyword is in the signature, but is unused. Default is "None".
-
- ``retry_connection`` -- This keyword is in the signature but is deprecated. Default is "True".
-
-
- Both ``timeout`` and ``retry_connection`` will be removed as the python-redis client
- no longer uses them.
-
Example usage::
>>> from pyres import *
@@ -140,9 +145,8 @@ class ResQ(object):
"""
def __init__(self, server="localhost:6379", password=None):
+ self.password = password
self.redis = server
- if password:
- self.redis.auth(password)
self._watched_queues = set()
def push(self, queue, item):
@@ -150,13 +154,13 @@ def push(self, queue, item):
self.redis.rpush("resque:queue:%s" % queue, ResQ.encode(item))
def pop(self, queues, timeout=10):
- if isinstance(queues, basestring):
+ if isinstance(queues, string_types):
queues = [queues]
ret = self.redis.blpop(["resque:queue:%s" % q for q in queues],
timeout=timeout)
if ret:
key, ret = ret
- return key[13:], ResQ.decode(ret) # trim "resque:queue:"
+ return key[13:].decode(), ResQ.decode(ret) # trim "resque:queue:"
else:
return None, None
@@ -184,10 +188,11 @@ def _get_redis(self):
return self._redis
def _set_redis(self, server):
- if isinstance(server, basestring):
+ if isinstance(server, string_types):
self.dsn = server
- host, port = server.split(':')
- self._redis = Redis(host=host, port=int(port))
+ address, _, db = server.partition('/')
+ host, port = address.split(':')
+ self._redis = Redis(host=host, port=int(port), db=int(db or 0), password=self.password)
self.host = host
self.port = int(port)
elif isinstance(server, Redis):
@@ -212,28 +217,26 @@ def enqueue(self, klass, *args):
queue = getattr(klass,'queue', None)
if queue:
class_name = '%s.%s' % (klass.__module__, klass.__name__)
- self.push(queue, {'class':class_name,'args':args})
- logging.info("enqueued '%s' job on queue %s" % (class_name, queue))
- if args:
- logging.debug("job arguments: %s" % str(args))
- else:
- logging.debug("no arguments passed in.")
+ self.enqueue_from_string(class_name, queue, *args)
else:
- logging.warning("unable to enqueue job with class %s" % str(klass))
+ logger.warning("unable to enqueue job with class %s" % str(klass))
def enqueue_from_string(self, klass_as_string, queue, *args, **kwargs):
- payload = {'class':klass_as_string, 'queue': queue, 'args':args}
+ payload = {'class':klass_as_string, 'args':args, 'enqueue_timestamp': time.time()}
if 'first_attempt' in kwargs:
payload['first_attempt'] = kwargs['first_attempt']
self.push(queue, payload)
- logging.info("enqueued '%s' job on queue %s" % (klass_as_string, queue))
+ logger.info("enqueued '%s' job on queue %s" % (klass_as_string, queue))
if args:
- logging.debug("job arguments: %s" % str(args))
+ logger.debug("job arguments: %s" % str(args))
else:
- logging.debug("no arguments passed in.")
+ logger.debug("no arguments passed in.")
def queues(self):
- return self.redis.smembers("resque:queues") or []
+ return [sm.decode() for sm in self.redis.smembers("resque:queues")] or []
+
+ def workers(self):
+ return [w.decode() for w in self.redis.smembers("resque:workers")] or []
def info(self):
"""Returns a dictionary of the current status of the pending jobs,
@@ -254,7 +257,7 @@ def info(self):
}
def keys(self):
- return [key.replace('resque:','')
+ return [key.decode().replace('resque:','')
for key in self.redis.keys('resque:*')]
def reserve(self, queues):
@@ -262,11 +265,7 @@ def reserve(self, queues):
return Job.reserve(queues, self)
def __str__(self):
- return "PyRes Client connected to %s" % self.redis.server
-
- def workers(self):
- from pyres.worker import Worker
- return Worker.all(self)
+ return "PyRes Client connected to %s" % self.dsn
def working(self):
from pyres.worker import Worker
@@ -286,11 +285,14 @@ def close(self):
def enqueue_at(self, datetime, klass, *args, **kwargs):
class_name = '%s.%s' % (klass.__module__, klass.__name__)
- logging.info("scheduled '%s' job on queue %s for execution at %s" %
- (class_name, klass.queue, datetime))
+ self.enqueue_at_from_string(datetime, class_name, klass.queue, *args, **kwargs)
+
+ def enqueue_at_from_string(self, datetime, klass_as_string, queue, *args, **kwargs):
+ logger.info("scheduled '%s' job on queue %s for execution at %s" %
+ (klass_as_string, queue, datetime))
if args:
- logging.debug("job arguments are: %s" % str(args))
- payload = {'class':class_name, 'queue': klass.queue, 'args':args}
+ logger.debug("job arguments are: %s" % str(args))
+ payload = {'class': klass_as_string, 'queue': queue, 'args': args}
if 'first_attempt' in kwargs:
payload['first_attempt'] = kwargs['first_attempt']
self.delayed_push(datetime, payload)
@@ -311,7 +313,7 @@ def delayed_queue_schedule_size(self):
size = 0
length = self.redis.zcard('resque:delayed_queue_schedule')
for i in self.redis.zrange('resque:delayed_queue_schedule',0,length):
- size += self.delayed_timestamp_size(i)
+ size += self.delayed_timestamp_size(i.decode())
return size
def delayed_timestamp_size(self, timestamp):
@@ -321,11 +323,13 @@ def delayed_timestamp_size(self, timestamp):
def next_delayed_timestamp(self):
key = int(time.mktime(ResQ._current_time().timetuple()))
array = self.redis.zrangebyscore('resque:delayed_queue_schedule',
- '-inf', key)
+ '-inf', key, start=0, num=1)
timestamp = None
if array:
timestamp = array[0]
- return timestamp
+
+ if timestamp:
+ return timestamp.decode()
def next_item_for_timestamp(self, timestamp):
#key = int(time.mktime(timestamp.timetuple()))
@@ -345,10 +349,10 @@ def encode(cls, item):
@classmethod
def decode(cls, item):
- if isinstance(item, basestring):
- ret = json.loads(item)
- return ret
- return None
+ if not isinstance(item, string_types):
+ item = item.decode()
+ ret = json.loads(item)
+ return ret
@classmethod
def _enqueue(cls, klass, *args):
@@ -356,7 +360,8 @@ def _enqueue(cls, klass, *args):
_self = cls()
if queue:
class_name = '%s.%s' % (klass.__module__, klass.__name__)
- _self.push(queue, {'class':class_name,'args':args})
+ _self.push(queue, {'class':class_name,'args':args,
+ 'enqueue_timestamp': time.time()})
@staticmethod
def _current_time():
diff --git a/pyres/compat.py b/pyres/compat.py
new file mode 100644
index 0000000..c39fd3f
--- /dev/null
+++ b/pyres/compat.py
@@ -0,0 +1,30 @@
+import sys
+import types
+
+try:
+ import cPickle as pickle
+except ImportError: # pragma: no cover
+ import pickle
+
+# True if we are running on Python 3.
+PY3 = sys.version_info[0] == 3
+
+if PY3: # pragma: no cover
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+ long = int
+ import subprocess as commands
+
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+ long = long
+ import commands
+
+
diff --git a/pyres/exceptions.py b/pyres/exceptions.py
index 6269e81..1ced01d 100644
--- a/pyres/exceptions.py
+++ b/pyres/exceptions.py
@@ -1,2 +1,11 @@
class NoQueueError(Exception):
pass
+
+class JobError(RuntimeError):
+ pass
+
+class TimeoutError(JobError):
+ pass
+
+class CrashError(JobError):
+ pass
\ No newline at end of file
diff --git a/pyres/failure/base.py b/pyres/failure/base.py
index c902299..330fbe4 100644
--- a/pyres/failure/base.py
+++ b/pyres/failure/base.py
@@ -17,10 +17,14 @@ class BaseBackend(object):
"""
def __init__(self, exp, queue, payload, worker=None):
- excc, _, tb = sys.exc_info()
+ excc = sys.exc_info()[0]
self._exception = excc
- self._traceback = traceback.format_exc()
+ try:
+ self._traceback = traceback.format_exc()
+ except AttributeError:
+ self._traceback = None
+
self._worker = worker
self._queue = queue
self._payload = payload
diff --git a/pyres/failure/multiple.py b/pyres/failure/multiple.py
index e4d05f7..6362363 100644
--- a/pyres/failure/multiple.py
+++ b/pyres/failure/multiple.py
@@ -1,5 +1,5 @@
-from base import BaseBackend
-from redis import RedisBackend
+from pyres.failure.base import BaseBackend
+from pyres.failure.redis import RedisBackend
class MultipleBackend(BaseBackend):
"""Extends ``BaseBackend`` to provide support for delegating calls to multiple
diff --git a/pyres/failure/redis.py b/pyres/failure/redis.py
index 715f9de..5fe71ee 100644
--- a/pyres/failure/redis.py
+++ b/pyres/failure/redis.py
@@ -1,7 +1,7 @@
import datetime, time
from base64 import b64encode
-from base import BaseBackend
+from .base import BaseBackend
from pyres import ResQ
class RedisBackend(BaseBackend):
diff --git a/pyres/horde.py b/pyres/horde.py
index 03be633..b41b7a4 100644
--- a/pyres/horde.py
+++ b/pyres/horde.py
@@ -10,8 +10,12 @@
import logging.handlers
from pyres import ResQ, Stat, get_logging_handler, special_log_file
from pyres.exceptions import NoQueueError
-from pyres.utils import OrderedDict
+try:
+ from collections import OrderedDict
+except ImportError:
+ from ordereddict import OrderedDict
from pyres.job import Job
+from pyres.compat import string_types
import pyres.json_parser as json
try:
from setproctitle import setproctitle
@@ -20,7 +24,7 @@ def setproctitle(name):
pass
def setup_logging(procname, namespace='', log_level=logging.INFO, log_file=None):
-
+
logger = multiprocessing.get_logger()
#logger = multiprocessing.log_to_stderr()
logger.setLevel(log_level)
@@ -29,63 +33,67 @@ def setup_logging(procname, namespace='', log_level=logging.INFO, log_file=None)
return logger
class Minion(multiprocessing.Process):
- def __init__(self, queues, server, password, log_level=logging.INFO, log_path=None):
+ def __init__(self, queues, server, password, log_level=logging.INFO, log_path=None, interval=5, concat_logs=False,
+ max_jobs=0):
multiprocessing.Process.__init__(self, name='Minion')
-
+
#format = '%(asctime)s %(levelname)s %(filename)s-%(lineno)d: %(message)s'
#logHandler = logging.StreamHandler()
#logHandler.setFormatter(logging.Formatter(format))
#self.logger = multiprocessing.get_logger()
#self.logger.addHandler(logHandler)
#self.logger.setLevel(logging.DEBUG)
-
+
self.queues = queues
self._shutdown = False
self.hostname = os.uname()[1]
self.server = server
self.password = password
-
+ self.interval = interval
+
self.log_level = log_level
self.log_path = log_path
self.log_file = None
-
+ self.concat_logs = concat_logs
+ self.max_jobs = max_jobs
+
def prune_dead_workers(self):
pass
-
+
def schedule_shutdown(self, signum, frame):
self._shutdown = True
-
+
def register_signal_handlers(self):
signal.signal(signal.SIGTERM, self.schedule_shutdown)
signal.signal(signal.SIGINT, self.schedule_shutdown)
signal.signal(signal.SIGQUIT, self.schedule_shutdown)
-
+
def register_minion(self):
self.resq.redis.sadd('resque:minions',str(self))
self.started = datetime.datetime.now()
-
+
def startup(self):
self.register_signal_handlers()
self.prune_dead_workers()
self.register_minion()
-
+
def __str__(self):
return '%s:%s:%s' % (self.hostname, self.pid, ','.join(self.queues))
-
+
def reserve(self):
self.logger.debug('checking queues: %s' % self.queues)
job = Job.reserve(self.queues, self.resq, self.__str__())
if job:
self.logger.info('Found job on %s' % job._queue)
return job
-
+
def process(self, job):
if not job:
return
try:
self.working_on(job)
job.perform()
- except Exception, e:
+ except Exception as e:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
self.logger.error("%s failed: %s" % (job, e))
job.fail(exceptionTraceback)
@@ -95,7 +103,7 @@ def process(self, job):
self.logger.info('completed job: %s' % job)
finally:
self.done_working()
-
+
def working_on(self, job):
setproctitle('pyres_minion:%s: working on job: %s' % (os.getppid(), job._payload))
self.logger.debug('marking as working on')
@@ -108,66 +116,78 @@ def working_on(self, job):
self.resq.redis["resque:minion:%s" % str(self)] = data
self.logger.debug("minion:%s" % str(self))
#self.logger.debug(self.resq.redis["resque:minion:%s" % str(self)])
-
+
def failed(self):
Stat("failed", self.resq).incr()
-
+
def processed(self):
total_processed = Stat("processed", self.resq)
total_processed.incr()
-
+
def done_working(self):
self.logger.debug('done working')
self.processed()
self.resq.redis.delete("resque:minion:%s" % str(self))
-
+
def unregister_minion(self):
self.resq.redis.srem('resque:minions',str(self))
self.started = None
-
+
def work(self, interval=5):
-
+
self.startup()
+ cur_job = 0
while True:
setproctitle('pyres_minion:%s: waiting for job on: %s' % (os.getppid(),self.queues))
self.logger.info('waiting on job')
if self._shutdown:
self.logger.info('shutdown scheduled')
break
+ self.logger.debug('max_jobs: %d cur_jobs: %d' % (self.max_jobs, cur_job))
+ if (self.max_jobs > 0 and self.max_jobs < cur_job):
+ self.logger.debug('max_jobs reached on %s: %d' % (self.pid, cur_job))
+ self.logger.debug('minion sleeping for: %d secs' % interval)
+ time.sleep(interval)
+ cur_job = 0
job = self.reserve()
if job:
self.process(job)
+ cur_job = cur_job + 1
else:
+ cur_job = 0
+ self.logger.debug('minion sleeping for: %d secs' % interval)
time.sleep(interval)
self.unregister_minion()
-
+
def clear_logger(self):
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
-
+
def run(self):
setproctitle('pyres_minion:%s: Starting' % (os.getppid(),))
if self.log_path:
if special_log_file(self.log_path):
self.log_file = self.log_path
+ elif self.concat_logs:
+ self.log_file = os.path.join(self.log_path, 'minion.log')
else:
self.log_file = os.path.join(self.log_path, 'minion-%s.log' % self.pid)
namespace = 'minion:%s' % self.pid
self.logger = setup_logging('minion', namespace, self.log_level, self.log_file)
#self.clear_logger()
- if isinstance(self.server,basestring):
+ if isinstance(self.server,string_types):
self.resq = ResQ(server=self.server, password=self.password)
elif isinstance(self.server, ResQ):
self.resq = self.server
else:
raise Exception("Bad server argument")
-
-
- self.work()
+
+
+ self.work(self.interval)
#while True:
# job = self.q.get()
# print 'pid: %s is running %s ' % (self.pid,job)
-
+
class Khan(object):
_command_map = {
@@ -175,7 +195,8 @@ class Khan(object):
'REMOVE': '_remove_minion',
'SHUTDOWN': '_schedule_shutdown'
}
- def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=None, logging_level=logging.INFO, log_file=None):
+ def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=None, logging_level=logging.INFO,
+ log_file=None, minions_interval=5, concat_minions_logs=False, max_jobs=0):
#super(Khan,self).__init__(queues=queues,server=server,password=password)
self._shutdown = False
self.pool_size = int(pool_size)
@@ -189,35 +210,39 @@ def __init__(self, pool_size=5, queues=[], server='localhost:6379', password=Non
self.password = password
self.logging_level = logging_level
self.log_file = log_file
-
+ self.minions_interval = minions_interval
+ self.concat_minions_logs = concat_minions_logs
+ self.max_jobs = max_jobs
+
#self._workers = list()
-
+
def setup_resq(self):
if hasattr(self,'logger'):
self.logger.info('Connecting to redis server - %s' % self.server)
- if isinstance(self.server,basestring):
+ if isinstance(self.server,string_types):
self.resq = ResQ(server=self.server, password=self.password)
elif isinstance(self.server, ResQ):
self.resq = self.server
else:
raise Exception("Bad server argument")
-
+
def validate_queues(self):
"Checks if a worker is given atleast one queue to work on."
if not self.queues:
raise NoQueueError("Please give each worker at least one queue.")
-
+
def startup(self):
self.register_signal_handlers()
-
-
+
+
def register_signal_handlers(self):
signal.signal(signal.SIGTERM, self.schedule_shutdown)
signal.signal(signal.SIGINT, self.schedule_shutdown)
signal.signal(signal.SIGQUIT, self.schedule_shutdown)
signal.signal(signal.SIGUSR1, self.kill_child)
signal.signal(signal.SIGUSR2, self.add_child)
- signal.signal(signal.SIGINFO, self.current_state)
+ if hasattr(signal, 'SIGINFO'):
+ signal.signal(signal.SIGINFO, self.current_state)
def current_state(self):
tmap = {}
@@ -251,23 +276,23 @@ def current_state(self):
def _schedule_shutdown(self):
self.schedule_shutdown(None, None)
-
+
def schedule_shutdown(self, signum, frame):
self.logger.info('Khan Shutdown scheduled')
self._shutdown = True
-
+
def kill_child(self, signum, frame):
self._remove_minion()
-
+
def add_child(self, signum, frame):
self.add_minion()
-
+
def register_khan(self):
if not hasattr(self, 'resq'):
self.setup_resq()
self.resq.redis.sadd('resque:khans',str(self))
self.started = datetime.datetime.now()
-
+
def _check_commands(self):
if not self._shutdown:
self.logger.debug('Checking commands')
@@ -276,7 +301,7 @@ def _check_commands(self):
if command:
self.process_command(command)
self._check_commands()
-
+
def process_command(self, command):
self.logger.info('Processing Command')
#available commands, shutdown, add 1, remove 1
@@ -285,13 +310,13 @@ def process_command(self, command):
fn = getattr(self, command_item)
if fn:
fn()
-
+
def add_minion(self):
self._add_minion()
self.resq.redis.srem('resque:khans',str(self))
self.pool_size += 1
self.resq.redis.sadd('resque:khans',str(self))
-
+
def _add_minion(self):
if hasattr(self,'logger'):
self.logger.info('Adding minion')
@@ -302,13 +327,15 @@ def _add_minion(self):
log_path = os.path.dirname(self.log_file)
else:
log_path = None
- m = Minion(self.queues, self.server, self.password, log_level=self.logging_level, log_path=log_path)
+ m = Minion(self.queues, self.server, self.password, interval=self.minions_interval,
+ log_level=self.logging_level, log_path=log_path, concat_logs=self.concat_minions_logs,
+ max_jobs=self.max_jobs)
m.start()
self._workers[m.pid] = m
if hasattr(self,'logger'):
self.logger.info('minion added at: %s' % m.pid)
return m
-
+
def _shutdown_minions(self):
"""
send the SIGNINT signal to each worker in the pool.
@@ -317,7 +344,7 @@ def _shutdown_minions(self):
for minion in self._workers.values():
minion.terminate()
minion.join()
-
+
def _remove_minion(self, pid=None):
#if pid:
# m = self._workers.pop(pid)
@@ -327,20 +354,20 @@ def _remove_minion(self, pid=None):
self.pool_size -= 1
self.resq.redis.sadd('resque:khans',str(self))
return m
-
+
def unregister_khan(self):
if hasattr(self,'logger'):
self.logger.debug('unregistering khan')
self.resq.redis.srem('resque:khans',str(self))
self.started = None
-
+
def setup_minions(self):
for i in range(self.pool_size):
self._add_minion()
def _setup_logging(self):
self.logger = setup_logging('khan', 'khan', self.logging_level, self.log_file)
-
+
def work(self, interval=2):
setproctitle('pyres_manager: Starting')
self.startup()
@@ -360,17 +387,21 @@ def work(self, interval=2):
break
#get job
else:
+ self.logger.debug('manager sleeping for: %d secs' % interval)
time.sleep(interval)
self.unregister_khan()
-
+
def __str__(self):
hostname = os.uname()[1]
return '%s:%s:%s' % (hostname, self.pid, self.pool_size)
-
+
@classmethod
- def run(cls, pool_size=5, queues=[], server='localhost:6379', logging_level=logging.INFO, log_file=None):
- worker = cls(pool_size=pool_size, queues=queues, server=server, logging_level=logging_level, log_file=log_file)
- worker.work()
+ def run(cls, pool_size=5, queues=[], server='localhost:6379', password=None, interval=2,
+ logging_level=logging.INFO, log_file=None, minions_interval=5, concat_minions_logs=False, max_jobs=0):
+ worker = cls(pool_size=pool_size, queues=queues, server=server, password=password, logging_level=logging_level,
+ log_file=log_file, minions_interval=minions_interval, concat_minions_logs=concat_minions_logs,
+ max_jobs=max_jobs)
+ worker.work(interval=interval)
#if __name__ == "__main__":
# k = Khan()
diff --git a/pyres/job.py b/pyres/job.py
index be329fd..4f4b547 100644
--- a/pyres/job.py
+++ b/pyres/job.py
@@ -1,8 +1,10 @@
+import logging
+import time
from datetime import timedelta
from pyres import ResQ, safe_str_to_class
from pyres import failure
from pyres.failure.redis import RedisBackend
-
+from pyres.compat import string_types
class Job(object):
"""Every job on the ResQ is an instance of the *Job* class.
@@ -24,13 +26,15 @@ class Job(object):
"""
safe_str_to_class = staticmethod(safe_str_to_class)
-
+
def __init__(self, queue, payload, resq, worker=None):
self._queue = queue
self._payload = payload
self.resq = resq
self._worker = worker
+ self.enqueue_timestamp = self._payload.get("enqueue_timestamp")
+
# Set the default back end, jobs can override when we import them
# inside perform().
failure.backend = RedisBackend
@@ -43,7 +47,20 @@ def perform(self):
"""This method converts payload into args and calls the ``perform``
method on the payload class.
- #@ add entry_point loading
+ Before calling ``perform``, a ``before_perform`` class method
+ is called, if it exists. It takes a dictionary as an argument;
+ currently the only things stored on the dictionary are the
+ args passed into ``perform`` and a timestamp of when the job
+ was enqueued.
+
+ Similarly, an ``after_perform`` class method is called after
+ ``perform`` is finished. The metadata dictionary contains the
+ same data, plus a timestamp of when the job was performed, a
+ ``failed`` boolean value, and if it did fail, a ``retried``
+ boolean value. This method is called after retry, and is
+ called regardless of whether an exception is ultimately thrown
+ by the perform method.
+
"""
payload_class_str = self._payload["class"]
@@ -51,11 +68,35 @@ def perform(self):
payload_class.resq = self.resq
args = self._payload.get("args")
+ metadata = dict(args=args)
+ if self.enqueue_timestamp:
+ metadata["enqueue_timestamp"] = self.enqueue_timestamp
+
+ before_perform = getattr(payload_class, "before_perform", None)
+
+ metadata["failed"] = False
+ metadata["perform_timestamp"] = time.time()
+ check_after = True
try:
+ if before_perform:
+ payload_class.before_perform(metadata)
return payload_class.perform(*args)
- except:
+ except Exception as e:
+ metadata["failed"] = True
+ metadata["exception"] = e
if not self.retry(payload_class, args):
+ metadata["retried"] = False
raise
+ else:
+ metadata["retried"] = True
+ logging.exception("Retry scheduled after error in %s", self._payload)
+ finally:
+ after_perform = getattr(payload_class, "after_perform", None)
+
+ if after_perform:
+ payload_class.after_perform(metadata)
+
+ delattr(payload_class,'resq')
def fail(self, exception):
"""This method provides a way to fail a job and will use whatever
@@ -68,6 +109,11 @@ def fail(self, exception):
return fail
def retry(self, payload_class, args):
+ """This method provides a way to retry a job after a failure.
+ If the jobclass defined by the payload containes a ``retry_every`` attribute then pyres
+ will attempt to retry the job until successful or until timeout defined by ``retry_timeout`` on the payload class.
+
+ """
retry_every = getattr(payload_class, 'retry_every', None)
retry_timeout = getattr(payload_class, 'retry_timeout', 0)
@@ -88,7 +134,7 @@ def reserve(cls, queues, res, worker=None, timeout=10):
that other workers will not pick it up.
"""
- if isinstance(queues, basestring):
+ if isinstance(queues, string_types):
queues = [queues]
queue, payload = res.pop(queues, timeout=timeout)
if payload:
diff --git a/pyres/json_parser.py b/pyres/json_parser.py
index be80fb6..a8d18e7 100644
--- a/pyres/json_parser.py
+++ b/pyres/json_parser.py
@@ -1,4 +1,5 @@
from datetime import datetime
+from pyres.compat import string_types
try:
#import simplejson as json
@@ -24,13 +25,13 @@ def decode(self, json_string):
return self.convert(decoded)
def convert(self, value):
- if isinstance(value, basestring) and value.startswith(DATE_PREFIX):
+ if isinstance(value, string_types) and value.startswith(DATE_PREFIX):
try:
return datetime.strptime(value[len(DATE_PREFIX):], DATE_FORMAT)
except ValueError:
return value
elif isinstance(value, dict):
- for k, v in value.iteritems():
+ for k, v in value.items():
new = self.convert(v)
if new != v:
value[k] = new
diff --git a/pyres/scheduler.py b/pyres/scheduler.py
index 37907e1..11e4f58 100644
--- a/pyres/scheduler.py
+++ b/pyres/scheduler.py
@@ -3,6 +3,7 @@
import logging
from pyres import ResQ, __version__
+from pyres.compat import string_types
logger = logging.getLogger(__name__)
@@ -14,7 +15,7 @@ def __init__(self, server="localhost:6379", password=None):
>>> scheduler = Scheduler('localhost:6379')
"""
self._shutdown = False
- if isinstance(server, basestring):
+ if isinstance(server, string_types):
self.resq = ResQ(server=server, password=password)
elif isinstance(server, ResQ):
self.resq = server
@@ -66,7 +67,7 @@ def next_item(self, timestamp):
def handle_delayed_items(self):
for timestamp in self.next_timestamp():
_setproctitle('Handling timestamp %s' % timestamp)
- logger.info('handling timestamp: %s' % timestamp)
+ logger.debug('handling timestamp: %s' % timestamp)
for item in self.next_item(timestamp):
logger.debug('queueing item %s' % item)
klass = item['class']
diff --git a/pyres/scripts.py b/pyres/scripts.py
index fe65fd4..ff2d466 100644
--- a/pyres/scripts.py
+++ b/pyres/scripts.py
@@ -2,12 +2,9 @@
from optparse import OptionParser
-from itty import run_itty
-
from pyres.horde import Khan
from pyres import setup_logging, setup_pidfile
from pyres.scheduler import Scheduler
-from resweb import server as resweb_server
from pyres.worker import Worker
@@ -16,12 +13,16 @@ def pyres_manager():
parser = OptionParser(usage=usage)
#parser.add_option("-q", dest="queue_list")
parser.add_option("--host", dest="host", default="localhost")
- parser.add_option("--port",dest="port",type="int", default=6379)
- parser.add_option("-i", '--interval', dest='interval', default=None, help='the default time interval to sleep between runs')
+ parser.add_option("--port", dest="port",type="int", default=6379)
+ parser.add_option("--password", dest="password", default=None)
+ parser.add_option("-i", '--interval', dest='manager_interval', default=None, help='the default time interval to sleep between runs - manager')
+ parser.add_option("--minions_interval", dest='minions_interval', default=None, help='the default time interval to sleep between runs - minions')
parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.')
parser.add_option("--pool", type="int", dest="pool_size", default=1, help="Number of minions to spawn under the manager.")
+ parser.add_option("-j", "--process_max_jobs", dest="max_jobs", type=int, default=0, help='how many jobs should be processed on worker run.')
parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.')
parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.')
+ parser.add_option("--concat_minions_logs", action="store_true", dest="concat_minions_logs", help='Concat all minions logs on same file.')
(options,args) = parser.parse_args()
if len(args) != 1:
@@ -30,16 +31,23 @@ def pyres_manager():
log_level = getattr(logging, options.log_level.upper(), 'INFO')
#logging.basicConfig(level=log_level, format="%(asctime)s: %(levelname)s: %(message)s")
-
+ concat_minions_logs = options.concat_minions_logs
setup_pidfile(options.pidfile)
- interval = options.interval
- if interval is not None:
- interval = float(interval)
+ manager_interval = options.manager_interval
+ if manager_interval is not None:
+ manager_interval = float(manager_interval)
+
+ minions_interval = options.minions_interval
+ if minions_interval is not None:
+ minions_interval = float(minions_interval)
queues = args[0].split(',')
server = '%s:%s' % (options.host,options.port)
- Khan.run(pool_size=options.pool_size, queues=queues, server=server, logging_level=log_level, log_file=options.logfile)
+ password = options.password
+ Khan.run(pool_size=options.pool_size, queues=queues, server=server, password=password, interval=manager_interval,
+ logging_level=log_level, log_file=options.logfile, minions_interval=minions_interval,
+ concat_minions_logs=concat_minions_logs, max_jobs=options.max_jobs)
def pyres_scheduler():
@@ -47,7 +55,8 @@ def pyres_scheduler():
parser = OptionParser(usage=usage)
#parser.add_option("-q", dest="queue_list")
parser.add_option("--host", dest="host", default="localhost")
- parser.add_option("--port",dest="port",type="int", default=6379)
+ parser.add_option("--port", dest="port",type="int", default=6379)
+ parser.add_option("--password", dest="password", default=None)
parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.')
parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.')
parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.')
@@ -57,43 +66,8 @@ def pyres_scheduler():
setup_logging(procname="pyres_scheduler", log_level=log_level, filename=options.logfile)
setup_pidfile(options.pidfile)
server = '%s:%s' % (options.host, options.port)
- Scheduler.run(server)
-
-
-def pyres_web():
- usage = "usage: %prog [options]"
- parser = OptionParser(usage)
- parser.add_option("--host",
- dest="host",
- default="localhost",
- metavar="HOST")
- parser.add_option("--port",
- dest="port",
- type="int",
- default=8080)
- parser.add_option("--dsn",
- dest="dsn",
- help="Redis server to display")
- parser.add_option("--auth",
- dest="auth",
- help="Redis user:pass")
- parser.add_option("--server",
- dest="server",
- help="Server for itty to run under.",
- default='wsgiref')
- (options,args) = parser.parse_args()
-
- if options.dsn:
- from pyres import ResQ
- if options.auth is not None:
- from redis import Redis
- rhost, rport = options.dsn.split(':')
- ruser, rpass = options.auth.split(':')
- redis = Redis(host=rhost, port=int(rport), db=ruser, password=rpass)
- resweb_server.HOST = ResQ(redis)
- else:
- resweb_server.HOST = ResQ(options.dsn)
- run_itty(host=options.host, port=options.port, server=options.server)
+ password = options.password
+ Scheduler.run(server, password)
def pyres_worker():
@@ -101,11 +75,13 @@ def pyres_worker():
parser = OptionParser(usage=usage)
parser.add_option("--host", dest="host", default="localhost")
- parser.add_option("--port",dest="port",type="int", default=6379)
+ parser.add_option("--port", dest="port",type="int", default=6379)
+ parser.add_option("--password", dest="password", default=None)
parser.add_option("-i", '--interval', dest='interval', default=None, help='the default time interval to sleep between runs')
parser.add_option('-l', '--log-level', dest='log_level', default='info', help='log level. Valid values are "debug", "info", "warning", "error", "critical", in decreasing order of verbosity. Defaults to "info" if parameter not specified.')
parser.add_option('-f', dest='logfile', help='If present, a logfile will be used. "stderr", "stdout", and "syslog" are all special values.')
parser.add_option('-p', dest='pidfile', help='If present, a pidfile will be used.')
+ parser.add_option("-t", '--timeout', dest='timeout', default=None, help='the timeout in seconds for this worker')
(options,args) = parser.parse_args()
if len(args) != 1:
@@ -120,6 +96,9 @@ def pyres_worker():
if interval is not None:
interval = int(interval)
+ timeout = options.timeout and int(options.timeout)
+
queues = args[0].split(',')
server = '%s:%s' % (options.host,options.port)
- Worker.run(queues, server, interval)
+ password = options.password
+ Worker.run(queues, server, password, interval, timeout=timeout)
diff --git a/pyres/utils.py b/pyres/utils.py
deleted file mode 100644
index d640c11..0000000
--- a/pyres/utils.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from UserDict import DictMixin
-
-class OrderedDict(dict, DictMixin):
-
- def __init__(self, *args, **kwds):
- if len(args) > 1:
- raise TypeError('expected at most 1 arguments, got %d' % len(args))
- try:
- self.__end
- except AttributeError:
- self.clear()
- self.update(*args, **kwds)
-
- def clear(self):
- self.__end = end = []
- end += [None, end, end] # sentinel node for doubly linked list
- self.__map = {} # key --> [key, prev, next]
- dict.clear(self)
-
- def __setitem__(self, key, value):
- if key not in self:
- end = self.__end
- curr = end[1]
- curr[2] = end[1] = self.__map[key] = [key, curr, end]
- dict.__setitem__(self, key, value)
-
- def __delitem__(self, key):
- dict.__delitem__(self, key)
- key, prev, next = self.__map.pop(key)
- prev[2] = next
- next[1] = prev
-
- def __iter__(self):
- end = self.__end
- curr = end[2]
- while curr is not end:
- yield curr[0]
- curr = curr[2]
-
- def __reversed__(self):
- end = self.__end
- curr = end[1]
- while curr is not end:
- yield curr[0]
- curr = curr[1]
-
- def popitem(self, last=True):
- if not self:
- raise KeyError('dictionary is empty')
- key = reversed(self).next() if last else iter(self).next()
- value = self.pop(key)
- return key, value
-
- def __reduce__(self):
- items = [[k, self[k]] for k in self]
- tmp = self.__map, self.__end
- del self.__map, self.__end
- inst_dict = vars(self).copy()
- self.__map, self.__end = tmp
- if inst_dict:
- return (self.__class__, (items,), inst_dict)
- return self.__class__, (items,)
-
- def keys(self):
- return list(self)
-
- setdefault = DictMixin.setdefault
- update = DictMixin.update
- pop = DictMixin.pop
- values = DictMixin.values
- items = DictMixin.items
- iterkeys = DictMixin.iterkeys
- itervalues = DictMixin.itervalues
- iteritems = DictMixin.iteritems
-
- def __repr__(self):
- if not self:
- return '%s()' % (self.__class__.__name__,)
- return '%s(%r)' % (self.__class__.__name__, self.items())
-
- def copy(self):
- return self.__class__(self)
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- d = cls()
- for key in iterable:
- d[key] = value
- return d
-
- def __eq__(self, other):
- if isinstance(other, OrderedDict):
- return len(self)==len(other) and \
- all(p==q for p, q in zip(self.items(), other.items()))
- return dict.__eq__(self, other)
-
- def __ne__(self, other):
- return not self == other
diff --git a/pyres/worker.py b/pyres/worker.py
index 8a9f2b1..fc42b12 100644
--- a/pyres/worker.py
+++ b/pyres/worker.py
@@ -2,14 +2,14 @@
import signal
import datetime, time
import os, sys
-import json_parser as json
-import commands
+from pyres import json_parser as json
+from pyres.compat import commands
import random
-from pyres.exceptions import NoQueueError
+from pyres.exceptions import NoQueueError, JobError, TimeoutError, CrashError
from pyres.job import Job
from pyres import ResQ, Stat, __version__
-
+from pyres.compat import string_types
logger = logging.getLogger(__name__)
@@ -19,21 +19,22 @@ class Worker(object):
class and passes a comma-separated list of queues to listen on.::
>>> from pyres.worker import Worker
- >>> Worker.run([queue1, queue2], server="localhost:6379")
+ >>> Worker.run([queue1, queue2], server="localhost:6379/0")
"""
-
+
job_class = Job
-
- def __init__(self, queues=(), server="localhost:6379", password=None):
+
+ def __init__(self, queues=(), server="localhost:6379", password=None, timeout=None):
self.queues = queues
self.validate_queues()
self._shutdown = False
self.child = None
self.pid = os.getpid()
self.hostname = os.uname()[1]
+ self.timeout = timeout
- if isinstance(server, basestring):
+ if isinstance(server, string_types):
self.resq = ResQ(server=server, password=password)
elif isinstance(server, ResQ):
self.resq = server
@@ -74,7 +75,7 @@ def unregister_worker(self):
def prune_dead_workers(self):
all_workers = Worker.all(self.resq)
- known_workers = self.worker_pids()
+ known_workers = Worker.worker_pids()
for worker in all_workers:
host, pid, queues = worker.id.split(':')
if host != self.hostname:
@@ -127,10 +128,9 @@ def work(self, interval=5):
that job to make sure another worker won't run it, then *forks* itself to
work on that job.
- Finally, the ``process`` method actually processes the job by eventually calling the Job instance's ``perform`` method.
-
"""
self._setproctitle("Starting")
+ logger.info("starting")
self.startup()
while True:
@@ -138,45 +138,12 @@ def work(self, interval=5):
logger.info('shutdown scheduled')
break
+ self.register_worker()
+
job = self.reserve(interval)
if job:
- logger.debug('picked up job')
- logger.debug('job details: %s' % job)
- self.before_fork(job)
- self.child = os.fork()
- if self.child:
- self._setproctitle("Forked %s at %s" %
- (self.child,
- datetime.datetime.now()))
- logger.info('Forked %s at %s' % (self.child,
- datetime.datetime.now()))
-
- try:
- os.waitpid(self.child, 0)
- except OSError as ose:
- import errno
-
- if ose.errno != errno.EINTR:
- raise ose
- #os.wait()
- logger.debug('done waiting')
- else:
- self._setproctitle("Processing %s since %s" %
- (job._queue,
- datetime.datetime.now()))
- logger.info('Processing %s since %s' %
- (job._queue, datetime.datetime.now()))
- self.after_fork(job)
-
- # re-seed the Python PRNG after forking, otherwise
- # all job process will share the same sequence of
- # random numbers
- random.seed()
-
- self.process(job)
- os._exit(0)
- self.child = None
+ self.fork_worker(job)
else:
if interval == 0:
break
@@ -185,6 +152,81 @@ def work(self, interval=5):
#time.sleep(interval)
self.unregister_worker()
+ def fork_worker(self, job):
+ """Invoked by ``work`` method. ``fork_worker`` does the actual forking to create the child
+ process that will process the job. It's also responsible for monitoring the child process
+ and handling hangs and crashes.
+
+ Finally, the ``process`` method actually processes the job by eventually calling the Job
+ instance's ``perform`` method.
+
+ """
+ logger.debug('picked up job')
+ logger.debug('job details: %s' % job)
+ self.before_fork(job)
+ self.child = os.fork()
+ if self.child:
+ self._setproctitle("Forked %s at %s" %
+ (self.child,
+ datetime.datetime.now()))
+ logger.info('Forked %s at %s' % (self.child,
+ datetime.datetime.now()))
+
+ try:
+ start = datetime.datetime.now()
+
+ # waits for the result or times out
+ while True:
+ pid, status = os.waitpid(self.child, os.WNOHANG)
+ if pid != 0:
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == 0:
+ break
+ if os.WIFSTOPPED(status):
+ logger.warning("Process stopped by signal %d" % os.WSTOPSIG(status))
+ else:
+ if os.WIFSIGNALED(status):
+ raise CrashError("Unexpected exit by signal %d" % os.WTERMSIG(status))
+ raise CrashError("Unexpected exit status %d" % os.WEXITSTATUS(status))
+
+ time.sleep(0.5)
+
+ now = datetime.datetime.now()
+ if self.timeout and ((now - start).seconds > self.timeout):
+ os.kill(self.child, signal.SIGKILL)
+ os.waitpid(-1, os.WNOHANG)
+ raise TimeoutError("Timed out after %d seconds" % self.timeout)
+
+ except OSError as ose:
+ import errno
+
+ if ose.errno != errno.EINTR:
+ raise ose
+ except JobError:
+ self._handle_job_exception(job)
+ finally:
+ # If the child process' job called os._exit manually we need to
+ # finish the clean up here.
+ if self.job():
+ self.done_working(job)
+
+ logger.debug('done waiting')
+ else:
+ self._setproctitle("Processing %s since %s" %
+ (job,
+ datetime.datetime.now()))
+ logger.info('Processing %s since %s' %
+ (job, datetime.datetime.now()))
+ self.after_fork(job)
+
+ # re-seed the Python PRNG after forking, otherwise
+ # all job process will share the same sequence of
+ # random numbers
+ random.seed()
+
+ self.process(job)
+ os._exit(0)
+ self.child = None
+
def before_fork(self, job):
"""
hook for making changes immediately before forking to process
@@ -205,26 +247,38 @@ def before_process(self, job):
def process(self, job=None):
if not job:
job = self.reserve()
+
+ job_failed = False
try:
- self.working_on(job)
- job = self.before_process(job)
- return job.perform()
- except Exception, e:
- exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
- logger.exception("%s failed: %s" % (job, e))
- job.fail(exceptionTraceback)
- self.failed()
- else:
- logger.info('completed job')
- logger.debug('job details: %s' % job)
+ try:
+ self.working_on(job)
+ job = self.before_process(job)
+ return job.perform()
+ except Exception:
+ job_failed = True
+ self._handle_job_exception(job)
+ except SystemExit as e:
+ if e.code != 0:
+ job_failed = True
+ self._handle_job_exception(job)
+
+ if not job_failed:
+ logger.debug('completed job')
+ logger.debug('job details: %s' % job)
finally:
- self.done_working()
+ self.done_working(job)
+
+ def _handle_job_exception(self, job):
+ exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
+ logger.exception("%s failed: %s" % (job, exceptionValue))
+ job.fail(exceptionTraceback)
+ self.failed()
def reserve(self, timeout=10):
logger.debug('checking queues %s' % self.queues)
job = self.job_class.reserve(self.queues, self.resq, self.__str__(), timeout=timeout)
if job:
- logger.info('Found job on %s' % job._queue)
+ logger.info('Found job on %s: %s' % (job._queue, job))
return job
def working_on(self, job):
@@ -239,8 +293,8 @@ def working_on(self, job):
logger.debug("worker:%s" % str(self))
logger.debug(self.resq.redis["resque:worker:%s" % str(self)])
- def done_working(self):
- logger.info('done working')
+ def done_working(self, job):
+ logger.debug('done working on %s', job)
self.processed()
self.resq.redis.delete("resque:worker:%s" % str(self))
@@ -275,16 +329,20 @@ def state(self):
return 'working'
return 'idle'
- def worker_pids(self):
+ @classmethod
+ def worker_pids(cls):
"""Returns an array of all pids (as strings) of the workers on
this machine. Used when pruning dead workers."""
- return map(lambda l: l.strip().split(' ')[0],
- commands.getoutput("ps -A -o pid,command | \
- grep pyres_worker").split("\n"))
+ cmd = "ps -A -o pid,command | grep pyres_worker | grep -v grep"
+ output = commands.getoutput(cmd)
+ if output:
+ return map(lambda l: l.strip().split(' ')[0], output.split("\n"))
+ else:
+ return []
@classmethod
- def run(cls, queues, server="localhost:6379", interval=None):
- worker = cls(queues=queues, server=server)
+ def run(cls, queues, server="localhost:6379", password=None, interval=None, timeout=None):
+ worker = cls(queues=queues, server=server, password=password, timeout=timeout)
if interval is not None:
worker.work(interval)
else:
@@ -292,16 +350,16 @@ def run(cls, queues, server="localhost:6379", interval=None):
@classmethod
def all(cls, host="localhost:6379"):
- if isinstance(host,basestring):
+ if isinstance(host,string_types):
resq = ResQ(host)
elif isinstance(host, ResQ):
resq = host
- return [Worker.find(w,resq) for w in resq.redis.smembers('resque:workers') or []]
+ return [Worker.find(w,resq) for w in resq.workers() or []]
@classmethod
def working(cls, host):
- if isinstance(host, basestring):
+ if isinstance(host, string_types):
resq = ResQ(host)
elif isinstance(host, ResQ):
resq = host
diff --git a/requirements-test.txt b/requirements-test.txt
new file mode 100644
index 0000000..866dd41
--- /dev/null
+++ b/requirements-test.txt
@@ -0,0 +1 @@
+nose==1.1.2
diff --git a/requirements.txt b/requirements.txt
index 1a7b5c6..96046b5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,3 @@
-simplejson==2.0.9
-itty==0.6.2
-redis>=1.34.1
-pystache==0.1.0
-setproctitle>=1.0
+simplejson>3.0
+redis>2.4.12
+setproctitle>1.0
diff --git a/resweb/__init__.py b/resweb/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/resweb/media/idle.png b/resweb/media/idle.png
deleted file mode 100755
index 50ffda6..0000000
Binary files a/resweb/media/idle.png and /dev/null differ
diff --git a/resweb/media/jquery-1.3.2.min.js b/resweb/media/jquery-1.3.2.min.js
deleted file mode 100644
index b1ae21d..0000000
--- a/resweb/media/jquery-1.3.2.min.js
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * jQuery JavaScript Library v1.3.2
- * http://jquery.com/
- *
- * Copyright (c) 2009 John Resig
- * Dual licensed under the MIT and GPL licenses.
- * http://docs.jquery.com/License
- *
- * Date: 2009-02-19 17:34:21 -0500 (Thu, 19 Feb 2009)
- * Revision: 6246
- */
-(function(){var l=this,g,y=l.jQuery,p=l.$,o=l.jQuery=l.$=function(E,F){return new o.fn.init(E,F)},D=/^[^<]*(<(.|\s)+>)[^>]*$|^#([\w-]+)$/,f=/^.[^:#\[\.,]*$/;o.fn=o.prototype={init:function(E,H){E=E||document;if(E.nodeType){this[0]=E;this.length=1;this.context=E;return this}if(typeof E==="string"){var G=D.exec(E);if(G&&(G[1]||!H)){if(G[1]){E=o.clean([G[1]],H)}else{var I=document.getElementById(G[3]);if(I&&I.id!=G[3]){return o().find(E)}var F=o(I||[]);F.context=document;F.selector=E;return F}}else{return o(H).find(E)}}else{if(o.isFunction(E)){return o(document).ready(E)}}if(E.selector&&E.context){this.selector=E.selector;this.context=E.context}return this.setArray(o.isArray(E)?E:o.makeArray(E))},selector:"",jquery:"1.3.2",size:function(){return this.length},get:function(E){return E===g?Array.prototype.slice.call(this):this[E]},pushStack:function(F,H,E){var G=o(F);G.prevObject=this;G.context=this.context;if(H==="find"){G.selector=this.selector+(this.selector?" ":"")+E}else{if(H){G.selector=this.selector+"."+H+"("+E+")"}}return G},setArray:function(E){this.length=0;Array.prototype.push.apply(this,E);return this},each:function(F,E){return o.each(this,F,E)},index:function(E){return o.inArray(E&&E.jquery?E[0]:E,this)},attr:function(F,H,G){var E=F;if(typeof F==="string"){if(H===g){return this[0]&&o[G||"attr"](this[0],F)}else{E={};E[F]=H}}return this.each(function(I){for(F in E){o.attr(G?this.style:this,F,o.prop(this,E[F],G,I,F))}})},css:function(E,F){if((E=="width"||E=="height")&&parseFloat(F)<0){F=g}return this.attr(E,F,"curCSS")},text:function(F){if(typeof F!=="object"&&F!=null){return this.empty().append((this[0]&&this[0].ownerDocument||document).createTextNode(F))}var E="";o.each(F||this,function(){o.each(this.childNodes,function(){if(this.nodeType!=8){E+=this.nodeType!=1?this.nodeValue:o.fn.text([this])}})});return E},wrapAll:function(E){if(this[0]){var F=o(E,this[0].ownerDocument).clone();if(this[0].parentNode){F.insertBefore(this[0])}F.map(function(){var G=this;while(G.firstChild){G=G.firstChild}return G}).append(this)}return this},wrapInner:function(E){return this.each(function(){o(this).contents().wrapAll(E)})},wrap:function(E){return this.each(function(){o(this).wrapAll(E)})},append:function(){return this.domManip(arguments,true,function(E){if(this.nodeType==1){this.appendChild(E)}})},prepend:function(){return this.domManip(arguments,true,function(E){if(this.nodeType==1){this.insertBefore(E,this.firstChild)}})},before:function(){return this.domManip(arguments,false,function(E){this.parentNode.insertBefore(E,this)})},after:function(){return this.domManip(arguments,false,function(E){this.parentNode.insertBefore(E,this.nextSibling)})},end:function(){return this.prevObject||o([])},push:[].push,sort:[].sort,splice:[].splice,find:function(E){if(this.length===1){var F=this.pushStack([],"find",E);F.length=0;o.find(E,this[0],F);return F}else{return this.pushStack(o.unique(o.map(this,function(G){return o.find(E,G)})),"find",E)}},clone:function(G){var E=this.map(function(){if(!o.support.noCloneEvent&&!o.isXMLDoc(this)){var I=this.outerHTML;if(!I){var J=this.ownerDocument.createElement("div");J.appendChild(this.cloneNode(true));I=J.innerHTML}return o.clean([I.replace(/ jQuery\d+="(?:\d+|null)"/g,"").replace(/^\s*/,"")])[0]}else{return this.cloneNode(true)}});if(G===true){var H=this.find("*").andSelf(),F=0;E.find("*").andSelf().each(function(){if(this.nodeName!==H[F].nodeName){return}var I=o.data(H[F],"events");for(var K in I){for(var J in I[K]){o.event.add(this,K,I[K][J],I[K][J].data)}}F++})}return E},filter:function(E){return this.pushStack(o.isFunction(E)&&o.grep(this,function(G,F){return E.call(G,F)})||o.multiFilter(E,o.grep(this,function(F){return F.nodeType===1})),"filter",E)},closest:function(E){var G=o.expr.match.POS.test(E)?o(E):null,F=0;return this.map(function(){var H=this;while(H&&H.ownerDocument){if(G?G.index(H)>-1:o(H).is(E)){o.data(H,"closest",F);return H}H=H.parentNode;F++}})},not:function(E){if(typeof E==="string"){if(f.test(E)){return this.pushStack(o.multiFilter(E,this,true),"not",E)}else{E=o.multiFilter(E,this)}}var F=E.length&&E[E.length-1]!==g&&!E.nodeType;return this.filter(function(){return F?o.inArray(this,E)<0:this!=E})},add:function(E){return this.pushStack(o.unique(o.merge(this.get(),typeof E==="string"?o(E):o.makeArray(E))))},is:function(E){return !!E&&o.multiFilter(E,this).length>0},hasClass:function(E){return !!E&&this.is("."+E)},val:function(K){if(K===g){var E=this[0];if(E){if(o.nodeName(E,"option")){return(E.attributes.value||{}).specified?E.value:E.text}if(o.nodeName(E,"select")){var I=E.selectedIndex,L=[],M=E.options,H=E.type=="select-one";if(I<0){return null}for(var F=H?I:0,J=H?I+1:M.length;F=0||o.inArray(this.name,K)>=0)}else{if(o.nodeName(this,"select")){var N=o.makeArray(K);o("option",this).each(function(){this.selected=(o.inArray(this.value,N)>=0||o.inArray(this.text,N)>=0)});if(!N.length){this.selectedIndex=-1}}else{this.value=K}}})},html:function(E){return E===g?(this[0]?this[0].innerHTML.replace(/ jQuery\d+="(?:\d+|null)"/g,""):null):this.empty().append(E)},replaceWith:function(E){return this.after(E).remove()},eq:function(E){return this.slice(E,+E+1)},slice:function(){return this.pushStack(Array.prototype.slice.apply(this,arguments),"slice",Array.prototype.slice.call(arguments).join(","))},map:function(E){return this.pushStack(o.map(this,function(G,F){return E.call(G,F,G)}))},andSelf:function(){return this.add(this.prevObject)},domManip:function(J,M,L){if(this[0]){var I=(this[0].ownerDocument||this[0]).createDocumentFragment(),F=o.clean(J,(this[0].ownerDocument||this[0]),I),H=I.firstChild;if(H){for(var G=0,E=this.length;G1||G>0?I.cloneNode(true):I)}}if(F){o.each(F,z)}}return this;function K(N,O){return M&&o.nodeName(N,"table")&&o.nodeName(O,"tr")?(N.getElementsByTagName("tbody")[0]||N.appendChild(N.ownerDocument.createElement("tbody"))):N}}};o.fn.init.prototype=o.fn;function z(E,F){if(F.src){o.ajax({url:F.src,async:false,dataType:"script"})}else{o.globalEval(F.text||F.textContent||F.innerHTML||"")}if(F.parentNode){F.parentNode.removeChild(F)}}function e(){return +new Date}o.extend=o.fn.extend=function(){var J=arguments[0]||{},H=1,I=arguments.length,E=false,G;if(typeof J==="boolean"){E=J;J=arguments[1]||{};H=2}if(typeof J!=="object"&&!o.isFunction(J)){J={}}if(I==H){J=this;--H}for(;H-1}},swap:function(H,G,I){var E={};for(var F in G){E[F]=H.style[F];H.style[F]=G[F]}I.call(H);for(var F in G){H.style[F]=E[F]}},css:function(H,F,J,E){if(F=="width"||F=="height"){var L,G={position:"absolute",visibility:"hidden",display:"block"},K=F=="width"?["Left","Right"]:["Top","Bottom"];function I(){L=F=="width"?H.offsetWidth:H.offsetHeight;if(E==="border"){return}o.each(K,function(){if(!E){L-=parseFloat(o.curCSS(H,"padding"+this,true))||0}if(E==="margin"){L+=parseFloat(o.curCSS(H,"margin"+this,true))||0}else{L-=parseFloat(o.curCSS(H,"border"+this+"Width",true))||0}})}if(H.offsetWidth!==0){I()}else{o.swap(H,G,I)}return Math.max(0,Math.round(L))}return o.curCSS(H,F,J)},curCSS:function(I,F,G){var L,E=I.style;if(F=="opacity"&&!o.support.opacity){L=o.attr(E,"opacity");return L==""?"1":L}if(F.match(/float/i)){F=w}if(!G&&E&&E[F]){L=E[F]}else{if(q.getComputedStyle){if(F.match(/float/i)){F="float"}F=F.replace(/([A-Z])/g,"-$1").toLowerCase();var M=q.getComputedStyle(I,null);if(M){L=M.getPropertyValue(F)}if(F=="opacity"&&L==""){L="1"}}else{if(I.currentStyle){var J=F.replace(/\-(\w)/g,function(N,O){return O.toUpperCase()});L=I.currentStyle[F]||I.currentStyle[J];if(!/^\d+(px)?$/i.test(L)&&/^\d/.test(L)){var H=E.left,K=I.runtimeStyle.left;I.runtimeStyle.left=I.currentStyle.left;E.left=L||0;L=E.pixelLeft+"px";E.left=H;I.runtimeStyle.left=K}}}}return L},clean:function(F,K,I){K=K||document;if(typeof K.createElement==="undefined"){K=K.ownerDocument||K[0]&&K[0].ownerDocument||document}if(!I&&F.length===1&&typeof F[0]==="string"){var H=/^<(\w+)\s*\/?>$/.exec(F[0]);if(H){return[K.createElement(H[1])]}}var G=[],E=[],L=K.createElement("div");o.each(F,function(P,S){if(typeof S==="number"){S+=""}if(!S){return}if(typeof S==="string"){S=S.replace(/(<(\w+)[^>]*?)\/>/g,function(U,V,T){return T.match(/^(abbr|br|col|img|input|link|meta|param|hr|area|embed)$/i)?U:V+">"+T+">"});var O=S.replace(/^\s+/,"").substring(0,10).toLowerCase();var Q=!O.indexOf("",""]||!O.indexOf("",""]||O.match(/^<(thead|tbody|tfoot|colg|cap)/)&&[1,""]||!O.indexOf(""," "]||(!O.indexOf(""," "]||!O.indexOf(""," "]||!o.support.htmlSerialize&&[1,"div","
"]||[0,"",""];L.innerHTML=Q[1]+S+Q[2];while(Q[0]--){L=L.lastChild}if(!o.support.tbody){var R=/"&&!R?L.childNodes:[];for(var M=N.length-1;M>=0;--M){if(o.nodeName(N[M],"tbody")&&!N[M].childNodes.length){N[M].parentNode.removeChild(N[M])}}}if(!o.support.leadingWhitespace&&/^\s/.test(S)){L.insertBefore(K.createTextNode(S.match(/^\s*/)[0]),L.firstChild)}S=o.makeArray(L.childNodes)}if(S.nodeType){G.push(S)}else{G=o.merge(G,S)}});if(I){for(var J=0;G[J];J++){if(o.nodeName(G[J],"script")&&(!G[J].type||G[J].type.toLowerCase()==="text/javascript")){E.push(G[J].parentNode?G[J].parentNode.removeChild(G[J]):G[J])}else{if(G[J].nodeType===1){G.splice.apply(G,[J+1,0].concat(o.makeArray(G[J].getElementsByTagName("script"))))}I.appendChild(G[J])}}return E}return G},attr:function(J,G,K){if(!J||J.nodeType==3||J.nodeType==8){return g}var H=!o.isXMLDoc(J),L=K!==g;G=H&&o.props[G]||G;if(J.tagName){var F=/href|src|style/.test(G);if(G=="selected"&&J.parentNode){J.parentNode.selectedIndex}if(G in J&&H&&!F){if(L){if(G=="type"&&o.nodeName(J,"input")&&J.parentNode){throw"type property can't be changed"}J[G]=K}if(o.nodeName(J,"form")&&J.getAttributeNode(G)){return J.getAttributeNode(G).nodeValue}if(G=="tabIndex"){var I=J.getAttributeNode("tabIndex");return I&&I.specified?I.value:J.nodeName.match(/(button|input|object|select|textarea)/i)?0:J.nodeName.match(/^(a|area)$/i)&&J.href?0:g}return J[G]}if(!o.support.style&&H&&G=="style"){return o.attr(J.style,"cssText",K)}if(L){J.setAttribute(G,""+K)}var E=!o.support.hrefNormalized&&H&&F?J.getAttribute(G,2):J.getAttribute(G);return E===null?g:E}if(!o.support.opacity&&G=="opacity"){if(L){J.zoom=1;J.filter=(J.filter||"").replace(/alpha\([^)]*\)/,"")+(parseInt(K)+""=="NaN"?"":"alpha(opacity="+K*100+")")}return J.filter&&J.filter.indexOf("opacity=")>=0?(parseFloat(J.filter.match(/opacity=([^)]*)/)[1])/100)+"":""}G=G.replace(/-([a-z])/ig,function(M,N){return N.toUpperCase()});if(L){J[G]=K}return J[G]},trim:function(E){return(E||"").replace(/^\s+|\s+$/g,"")},makeArray:function(G){var E=[];if(G!=null){var F=G.length;if(F==null||typeof G==="string"||o.isFunction(G)||G.setInterval){E[0]=G}else{while(F){E[--F]=G[F]}}}return E},inArray:function(G,H){for(var E=0,F=H.length;E0?this.clone(true):this).get();o.fn[F].apply(o(L[K]),I);J=J.concat(I)}return this.pushStack(J,E,G)}});o.each({removeAttr:function(E){o.attr(this,E,"");if(this.nodeType==1){this.removeAttribute(E)}},addClass:function(E){o.className.add(this,E)},removeClass:function(E){o.className.remove(this,E)},toggleClass:function(F,E){if(typeof E!=="boolean"){E=!o.className.has(this,F)}o.className[E?"add":"remove"](this,F)},remove:function(E){if(!E||o.filter(E,[this]).length){o("*",this).add([this]).each(function(){o.event.remove(this);o.removeData(this)});if(this.parentNode){this.parentNode.removeChild(this)}}},empty:function(){o(this).children().remove();while(this.firstChild){this.removeChild(this.firstChild)}}},function(E,F){o.fn[E]=function(){return this.each(F,arguments)}});function j(E,F){return E[0]&&parseInt(o.curCSS(E[0],F,true),10)||0}var h="jQuery"+e(),v=0,A={};o.extend({cache:{},data:function(F,E,G){F=F==l?A:F;var H=F[h];if(!H){H=F[h]=++v}if(E&&!o.cache[H]){o.cache[H]={}}if(G!==g){o.cache[H][E]=G}return E?o.cache[H][E]:H},removeData:function(F,E){F=F==l?A:F;var H=F[h];if(E){if(o.cache[H]){delete o.cache[H][E];E="";for(E in o.cache[H]){break}if(!E){o.removeData(F)}}}else{try{delete F[h]}catch(G){if(F.removeAttribute){F.removeAttribute(h)}}delete o.cache[H]}},queue:function(F,E,H){if(F){E=(E||"fx")+"queue";var G=o.data(F,E);if(!G||o.isArray(H)){G=o.data(F,E,o.makeArray(H))}else{if(H){G.push(H)}}}return G},dequeue:function(H,G){var E=o.queue(H,G),F=E.shift();if(!G||G==="fx"){F=E[0]}if(F!==g){F.call(H)}}});o.fn.extend({data:function(E,G){var H=E.split(".");H[1]=H[1]?"."+H[1]:"";if(G===g){var F=this.triggerHandler("getData"+H[1]+"!",[H[0]]);if(F===g&&this.length){F=o.data(this[0],E)}return F===g&&H[1]?this.data(H[0]):F}else{return this.trigger("setData"+H[1]+"!",[H[0],G]).each(function(){o.data(this,E,G)})}},removeData:function(E){return this.each(function(){o.removeData(this,E)})},queue:function(E,F){if(typeof E!=="string"){F=E;E="fx"}if(F===g){return o.queue(this[0],E)}return this.each(function(){var G=o.queue(this,E,F);if(E=="fx"&&G.length==1){G[0].call(this)}})},dequeue:function(E){return this.each(function(){o.dequeue(this,E)})}});
-/*
- * Sizzle CSS Selector Engine - v0.9.3
- * Copyright 2009, The Dojo Foundation
- * Released under the MIT, BSD, and GPL Licenses.
- * More information: http://sizzlejs.com/
- */
-(function(){var R=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^[\]]*\]|['"][^'"]*['"]|[^[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?/g,L=0,H=Object.prototype.toString;var F=function(Y,U,ab,ac){ab=ab||[];U=U||document;if(U.nodeType!==1&&U.nodeType!==9){return[]}if(!Y||typeof Y!=="string"){return ab}var Z=[],W,af,ai,T,ad,V,X=true;R.lastIndex=0;while((W=R.exec(Y))!==null){Z.push(W[1]);if(W[2]){V=RegExp.rightContext;break}}if(Z.length>1&&M.exec(Y)){if(Z.length===2&&I.relative[Z[0]]){af=J(Z[0]+Z[1],U)}else{af=I.relative[Z[0]]?[U]:F(Z.shift(),U);while(Z.length){Y=Z.shift();if(I.relative[Y]){Y+=Z.shift()}af=J(Y,af)}}}else{var ae=ac?{expr:Z.pop(),set:E(ac)}:F.find(Z.pop(),Z.length===1&&U.parentNode?U.parentNode:U,Q(U));af=F.filter(ae.expr,ae.set);if(Z.length>0){ai=E(af)}else{X=false}while(Z.length){var ah=Z.pop(),ag=ah;if(!I.relative[ah]){ah=""}else{ag=Z.pop()}if(ag==null){ag=U}I.relative[ah](ai,ag,Q(U))}}if(!ai){ai=af}if(!ai){throw"Syntax error, unrecognized expression: "+(ah||Y)}if(H.call(ai)==="[object Array]"){if(!X){ab.push.apply(ab,ai)}else{if(U.nodeType===1){for(var aa=0;ai[aa]!=null;aa++){if(ai[aa]&&(ai[aa]===true||ai[aa].nodeType===1&&K(U,ai[aa]))){ab.push(af[aa])}}}else{for(var aa=0;ai[aa]!=null;aa++){if(ai[aa]&&ai[aa].nodeType===1){ab.push(af[aa])}}}}}else{E(ai,ab)}if(V){F(V,U,ab,ac);if(G){hasDuplicate=false;ab.sort(G);if(hasDuplicate){for(var aa=1;aa":function(Z,U,aa){var X=typeof U==="string";if(X&&!/\W/.test(U)){U=aa?U:U.toUpperCase();for(var V=0,T=Z.length;V=0)){if(!V){T.push(Y)}}else{if(V){U[X]=false}}}}return false},ID:function(T){return T[1].replace(/\\/g,"")},TAG:function(U,T){for(var V=0;T[V]===false;V++){}return T[V]&&Q(T[V])?U[1]:U[1].toUpperCase()},CHILD:function(T){if(T[1]=="nth"){var U=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(T[2]=="even"&&"2n"||T[2]=="odd"&&"2n+1"||!/\D/.test(T[2])&&"0n+"+T[2]||T[2]);T[2]=(U[1]+(U[2]||1))-0;T[3]=U[3]-0}T[0]=L++;return T},ATTR:function(X,U,V,T,Y,Z){var W=X[1].replace(/\\/g,"");if(!Z&&I.attrMap[W]){X[1]=I.attrMap[W]}if(X[2]==="~="){X[4]=" "+X[4]+" "}return X},PSEUDO:function(X,U,V,T,Y){if(X[1]==="not"){if(X[3].match(R).length>1||/^\w/.test(X[3])){X[3]=F(X[3],null,null,U)}else{var W=F.filter(X[3],U,V,true^Y);if(!V){T.push.apply(T,W)}return false}}else{if(I.match.POS.test(X[0])||I.match.CHILD.test(X[0])){return true}}return X},POS:function(T){T.unshift(true);return T}},filters:{enabled:function(T){return T.disabled===false&&T.type!=="hidden"},disabled:function(T){return T.disabled===true},checked:function(T){return T.checked===true},selected:function(T){T.parentNode.selectedIndex;return T.selected===true},parent:function(T){return !!T.firstChild},empty:function(T){return !T.firstChild},has:function(V,U,T){return !!F(T[3],V).length},header:function(T){return/h\d/i.test(T.nodeName)},text:function(T){return"text"===T.type},radio:function(T){return"radio"===T.type},checkbox:function(T){return"checkbox"===T.type},file:function(T){return"file"===T.type},password:function(T){return"password"===T.type},submit:function(T){return"submit"===T.type},image:function(T){return"image"===T.type},reset:function(T){return"reset"===T.type},button:function(T){return"button"===T.type||T.nodeName.toUpperCase()==="BUTTON"},input:function(T){return/input|select|textarea|button/i.test(T.nodeName)}},setFilters:{first:function(U,T){return T===0},last:function(V,U,T,W){return U===W.length-1},even:function(U,T){return T%2===0},odd:function(U,T){return T%2===1},lt:function(V,U,T){return UT[3]-0},nth:function(V,U,T){return T[3]-0==U},eq:function(V,U,T){return T[3]-0==U}},filter:{PSEUDO:function(Z,V,W,aa){var U=V[1],X=I.filters[U];if(X){return X(Z,W,V,aa)}else{if(U==="contains"){return(Z.textContent||Z.innerText||"").indexOf(V[3])>=0}else{if(U==="not"){var Y=V[3];for(var W=0,T=Y.length;W=0)}}},ID:function(U,T){return U.nodeType===1&&U.getAttribute("id")===T},TAG:function(U,T){return(T==="*"&&U.nodeType===1)||U.nodeName===T},CLASS:function(U,T){return(" "+(U.className||U.getAttribute("class"))+" ").indexOf(T)>-1},ATTR:function(Y,W){var V=W[1],T=I.attrHandle[V]?I.attrHandle[V](Y):Y[V]!=null?Y[V]:Y.getAttribute(V),Z=T+"",X=W[2],U=W[4];return T==null?X==="!=":X==="="?Z===U:X==="*="?Z.indexOf(U)>=0:X==="~="?(" "+Z+" ").indexOf(U)>=0:!U?Z&&T!==false:X==="!="?Z!=U:X==="^="?Z.indexOf(U)===0:X==="$="?Z.substr(Z.length-U.length)===U:X==="|="?Z===U||Z.substr(0,U.length+1)===U+"-":false},POS:function(X,U,V,Y){var T=U[2],W=I.setFilters[T];if(W){return W(X,V,U,Y)}}}};var M=I.match.POS;for(var O in I.match){I.match[O]=RegExp(I.match[O].source+/(?![^\[]*\])(?![^\(]*\))/.source)}var E=function(U,T){U=Array.prototype.slice.call(U);if(T){T.push.apply(T,U);return T}return U};try{Array.prototype.slice.call(document.documentElement.childNodes)}catch(N){E=function(X,W){var U=W||[];if(H.call(X)==="[object Array]"){Array.prototype.push.apply(U,X)}else{if(typeof X.length==="number"){for(var V=0,T=X.length;V ";var T=document.documentElement;T.insertBefore(U,T.firstChild);if(!!document.getElementById(V)){I.find.ID=function(X,Y,Z){if(typeof Y.getElementById!=="undefined"&&!Z){var W=Y.getElementById(X[1]);return W?W.id===X[1]||typeof W.getAttributeNode!=="undefined"&&W.getAttributeNode("id").nodeValue===X[1]?[W]:g:[]}};I.filter.ID=function(Y,W){var X=typeof Y.getAttributeNode!=="undefined"&&Y.getAttributeNode("id");return Y.nodeType===1&&X&&X.nodeValue===W}}T.removeChild(U)})();(function(){var T=document.createElement("div");T.appendChild(document.createComment(""));if(T.getElementsByTagName("*").length>0){I.find.TAG=function(U,Y){var X=Y.getElementsByTagName(U[1]);if(U[1]==="*"){var W=[];for(var V=0;X[V];V++){if(X[V].nodeType===1){W.push(X[V])}}X=W}return X}}T.innerHTML=" ";if(T.firstChild&&typeof T.firstChild.getAttribute!=="undefined"&&T.firstChild.getAttribute("href")!=="#"){I.attrHandle.href=function(U){return U.getAttribute("href",2)}}})();if(document.querySelectorAll){(function(){var T=F,U=document.createElement("div");U.innerHTML="
";if(U.querySelectorAll&&U.querySelectorAll(".TEST").length===0){return}F=function(Y,X,V,W){X=X||document;if(!W&&X.nodeType===9&&!Q(X)){try{return E(X.querySelectorAll(Y),V)}catch(Z){}}return T(Y,X,V,W)};F.find=T.find;F.filter=T.filter;F.selectors=T.selectors;F.matches=T.matches})()}if(document.getElementsByClassName&&document.documentElement.getElementsByClassName){(function(){var T=document.createElement("div");T.innerHTML="
";if(T.getElementsByClassName("e").length===0){return}T.lastChild.className="e";if(T.getElementsByClassName("e").length===1){return}I.order.splice(1,0,"CLASS");I.find.CLASS=function(U,V,W){if(typeof V.getElementsByClassName!=="undefined"&&!W){return V.getElementsByClassName(U[1])}}})()}function P(U,Z,Y,ad,aa,ac){var ab=U=="previousSibling"&&!ac;for(var W=0,V=ad.length;W0){X=T;break}}}T=T[U]}ad[W]=X}}}var K=document.compareDocumentPosition?function(U,T){return U.compareDocumentPosition(T)&16}:function(U,T){return U!==T&&(U.contains?U.contains(T):true)};var Q=function(T){return T.nodeType===9&&T.documentElement.nodeName!=="HTML"||!!T.ownerDocument&&Q(T.ownerDocument)};var J=function(T,aa){var W=[],X="",Y,V=aa.nodeType?[aa]:aa;while((Y=I.match.PSEUDO.exec(T))){X+=Y[0];T=T.replace(I.match.PSEUDO,"")}T=I.relative[T]?T+"*":T;for(var Z=0,U=V.length;Z0||T.offsetHeight>0};F.selectors.filters.animated=function(T){return o.grep(o.timers,function(U){return T===U.elem}).length};o.multiFilter=function(V,T,U){if(U){V=":not("+V+")"}return F.matches(V,T)};o.dir=function(V,U){var T=[],W=V[U];while(W&&W!=document){if(W.nodeType==1){T.push(W)}W=W[U]}return T};o.nth=function(X,T,V,W){T=T||1;var U=0;for(;X;X=X[V]){if(X.nodeType==1&&++U==T){break}}return X};o.sibling=function(V,U){var T=[];for(;V;V=V.nextSibling){if(V.nodeType==1&&V!=U){T.push(V)}}return T};return;l.Sizzle=F})();o.event={add:function(I,F,H,K){if(I.nodeType==3||I.nodeType==8){return}if(I.setInterval&&I!=l){I=l}if(!H.guid){H.guid=this.guid++}if(K!==g){var G=H;H=this.proxy(G);H.data=K}var E=o.data(I,"events")||o.data(I,"events",{}),J=o.data(I,"handle")||o.data(I,"handle",function(){return typeof o!=="undefined"&&!o.event.triggered?o.event.handle.apply(arguments.callee.elem,arguments):g});J.elem=I;o.each(F.split(/\s+/),function(M,N){var O=N.split(".");N=O.shift();H.type=O.slice().sort().join(".");var L=E[N];if(o.event.specialAll[N]){o.event.specialAll[N].setup.call(I,K,O)}if(!L){L=E[N]={};if(!o.event.special[N]||o.event.special[N].setup.call(I,K,O)===false){if(I.addEventListener){I.addEventListener(N,J,false)}else{if(I.attachEvent){I.attachEvent("on"+N,J)}}}}L[H.guid]=H;o.event.global[N]=true});I=null},guid:1,global:{},remove:function(K,H,J){if(K.nodeType==3||K.nodeType==8){return}var G=o.data(K,"events"),F,E;if(G){if(H===g||(typeof H==="string"&&H.charAt(0)==".")){for(var I in G){this.remove(K,I+(H||""))}}else{if(H.type){J=H.handler;H=H.type}o.each(H.split(/\s+/),function(M,O){var Q=O.split(".");O=Q.shift();var N=RegExp("(^|\\.)"+Q.slice().sort().join(".*\\.")+"(\\.|$)");if(G[O]){if(J){delete G[O][J.guid]}else{for(var P in G[O]){if(N.test(G[O][P].type)){delete G[O][P]}}}if(o.event.specialAll[O]){o.event.specialAll[O].teardown.call(K,Q)}for(F in G[O]){break}if(!F){if(!o.event.special[O]||o.event.special[O].teardown.call(K,Q)===false){if(K.removeEventListener){K.removeEventListener(O,o.data(K,"handle"),false)}else{if(K.detachEvent){K.detachEvent("on"+O,o.data(K,"handle"))}}}F=null;delete G[O]}}})}for(F in G){break}if(!F){var L=o.data(K,"handle");if(L){L.elem=null}o.removeData(K,"events");o.removeData(K,"handle")}}},trigger:function(I,K,H,E){var G=I.type||I;if(!E){I=typeof I==="object"?I[h]?I:o.extend(o.Event(G),I):o.Event(G);if(G.indexOf("!")>=0){I.type=G=G.slice(0,-1);I.exclusive=true}if(!H){I.stopPropagation();if(this.global[G]){o.each(o.cache,function(){if(this.events&&this.events[G]){o.event.trigger(I,K,this.handle.elem)}})}}if(!H||H.nodeType==3||H.nodeType==8){return g}I.result=g;I.target=H;K=o.makeArray(K);K.unshift(I)}I.currentTarget=H;var J=o.data(H,"handle");if(J){J.apply(H,K)}if((!H[G]||(o.nodeName(H,"a")&&G=="click"))&&H["on"+G]&&H["on"+G].apply(H,K)===false){I.result=false}if(!E&&H[G]&&!I.isDefaultPrevented()&&!(o.nodeName(H,"a")&&G=="click")){this.triggered=true;try{H[G]()}catch(L){}}this.triggered=false;if(!I.isPropagationStopped()){var F=H.parentNode||H.ownerDocument;if(F){o.event.trigger(I,K,F,true)}}},handle:function(K){var J,E;K=arguments[0]=o.event.fix(K||l.event);K.currentTarget=this;var L=K.type.split(".");K.type=L.shift();J=!L.length&&!K.exclusive;var I=RegExp("(^|\\.)"+L.slice().sort().join(".*\\.")+"(\\.|$)");E=(o.data(this,"events")||{})[K.type];for(var G in E){var H=E[G];if(J||I.test(H.type)){K.handler=H;K.data=H.data;var F=H.apply(this,arguments);if(F!==g){K.result=F;if(F===false){K.preventDefault();K.stopPropagation()}}if(K.isImmediatePropagationStopped()){break}}}},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode metaKey newValue originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),fix:function(H){if(H[h]){return H}var F=H;H=o.Event(F);for(var G=this.props.length,J;G;){J=this.props[--G];H[J]=F[J]}if(!H.target){H.target=H.srcElement||document}if(H.target.nodeType==3){H.target=H.target.parentNode}if(!H.relatedTarget&&H.fromElement){H.relatedTarget=H.fromElement==H.target?H.toElement:H.fromElement}if(H.pageX==null&&H.clientX!=null){var I=document.documentElement,E=document.body;H.pageX=H.clientX+(I&&I.scrollLeft||E&&E.scrollLeft||0)-(I.clientLeft||0);H.pageY=H.clientY+(I&&I.scrollTop||E&&E.scrollTop||0)-(I.clientTop||0)}if(!H.which&&((H.charCode||H.charCode===0)?H.charCode:H.keyCode)){H.which=H.charCode||H.keyCode}if(!H.metaKey&&H.ctrlKey){H.metaKey=H.ctrlKey}if(!H.which&&H.button){H.which=(H.button&1?1:(H.button&2?3:(H.button&4?2:0)))}return H},proxy:function(F,E){E=E||function(){return F.apply(this,arguments)};E.guid=F.guid=F.guid||E.guid||this.guid++;return E},special:{ready:{setup:B,teardown:function(){}}},specialAll:{live:{setup:function(E,F){o.event.add(this,F[0],c)},teardown:function(G){if(G.length){var E=0,F=RegExp("(^|\\.)"+G[0]+"(\\.|$)");o.each((o.data(this,"events").live||{}),function(){if(F.test(this.type)){E++}});if(E<1){o.event.remove(this,G[0],c)}}}}}};o.Event=function(E){if(!this.preventDefault){return new o.Event(E)}if(E&&E.type){this.originalEvent=E;this.type=E.type}else{this.type=E}this.timeStamp=e();this[h]=true};function k(){return false}function u(){return true}o.Event.prototype={preventDefault:function(){this.isDefaultPrevented=u;var E=this.originalEvent;if(!E){return}if(E.preventDefault){E.preventDefault()}E.returnValue=false},stopPropagation:function(){this.isPropagationStopped=u;var E=this.originalEvent;if(!E){return}if(E.stopPropagation){E.stopPropagation()}E.cancelBubble=true},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=u;this.stopPropagation()},isDefaultPrevented:k,isPropagationStopped:k,isImmediatePropagationStopped:k};var a=function(F){var E=F.relatedTarget;while(E&&E!=this){try{E=E.parentNode}catch(G){E=this}}if(E!=this){F.type=F.data;o.event.handle.apply(this,arguments)}};o.each({mouseover:"mouseenter",mouseout:"mouseleave"},function(F,E){o.event.special[E]={setup:function(){o.event.add(this,F,a,E)},teardown:function(){o.event.remove(this,F,a)}}});o.fn.extend({bind:function(F,G,E){return F=="unload"?this.one(F,G,E):this.each(function(){o.event.add(this,F,E||G,E&&G)})},one:function(G,H,F){var E=o.event.proxy(F||H,function(I){o(this).unbind(I,E);return(F||H).apply(this,arguments)});return this.each(function(){o.event.add(this,G,E,F&&H)})},unbind:function(F,E){return this.each(function(){o.event.remove(this,F,E)})},trigger:function(E,F){return this.each(function(){o.event.trigger(E,F,this)})},triggerHandler:function(E,G){if(this[0]){var F=o.Event(E);F.preventDefault();F.stopPropagation();o.event.trigger(F,G,this[0]);return F.result}},toggle:function(G){var E=arguments,F=1;while(Fa text ';var H=K.getElementsByTagName("*"),E=K.getElementsByTagName("a")[0];if(!H||!H.length||!E){return}o.support={leadingWhitespace:K.firstChild.nodeType==3,tbody:!K.getElementsByTagName("tbody").length,objectAll:!!K.getElementsByTagName("object")[0].getElementsByTagName("*").length,htmlSerialize:!!K.getElementsByTagName("link").length,style:/red/.test(E.getAttribute("style")),hrefNormalized:E.getAttribute("href")==="/a",opacity:E.style.opacity==="0.5",cssFloat:!!E.style.cssFloat,scriptEval:false,noCloneEvent:true,boxModel:null};G.type="text/javascript";try{G.appendChild(document.createTextNode("window."+J+"=1;"))}catch(I){}F.insertBefore(G,F.firstChild);if(l[J]){o.support.scriptEval=true;delete l[J]}F.removeChild(G);if(K.attachEvent&&K.fireEvent){K.attachEvent("onclick",function(){o.support.noCloneEvent=false;K.detachEvent("onclick",arguments.callee)});K.cloneNode(true).fireEvent("onclick")}o(function(){var L=document.createElement("div");L.style.width=L.style.paddingLeft="1px";document.body.appendChild(L);o.boxModel=o.support.boxModel=L.offsetWidth===2;document.body.removeChild(L).style.display="none"})})();var w=o.support.cssFloat?"cssFloat":"styleFloat";o.props={"for":"htmlFor","class":"className","float":w,cssFloat:w,styleFloat:w,readonly:"readOnly",maxlength:"maxLength",cellspacing:"cellSpacing",rowspan:"rowSpan",tabindex:"tabIndex"};o.fn.extend({_load:o.fn.load,load:function(G,J,K){if(typeof G!=="string"){return this._load(G)}var I=G.indexOf(" ");if(I>=0){var E=G.slice(I,G.length);G=G.slice(0,I)}var H="GET";if(J){if(o.isFunction(J)){K=J;J=null}else{if(typeof J==="object"){J=o.param(J);H="POST"}}}var F=this;o.ajax({url:G,type:H,dataType:"html",data:J,complete:function(M,L){if(L=="success"||L=="notmodified"){F.html(E?o("
").append(M.responseText.replace(/
-Failed Jobs
-retry all delete all
-Showing {{start}} to {{end}} of {{size}} jobs
-
-
- {{#failed_jobs}}
-
-
- Worker
-
- {{worker}} on {{queue}} at
- {{failed_at}}
-
- Class
- {{payload_class}}
- Arguments
- {{payload_args}}
- Exception
- {{exception}}
- Error
-
- {{error}}
- {{traceback}}
-
-
- Payload Actions
-
-
-
-
-
-
-
-
-
- {{/failed_jobs}}
-
-
-{{>footer}}
diff --git a/resweb/templates/footer.mustache b/resweb/templates/footer.mustache
deleted file mode 100644
index 8d1cc11..0000000
--- a/resweb/templates/footer.mustache
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-
-
-