From 0443bb29c41a9aaee8c72e507b92425bd0731f60 Mon Sep 17 00:00:00 2001
From: Alman One
Date: Tue, 6 Aug 2013 14:48:42 +0200
Subject: [PATCH 0001/1103] Initial riak support
---
celery/backends/__init__.py | 1 +
celery/backends/riak.py | 139 +++++++++++++++++++++++
celery/tests/backends/test_riak.py | 174 +++++++++++++++++++++++++++++
requirements/test-ci.txt | 1 +
4 files changed, 315 insertions(+)
create mode 100644 celery/backends/riak.py
create mode 100644 celery/tests/backends/test_riak.py
diff --git a/celery/backends/__init__.py b/celery/backends/__init__.py
index 421f7f480..4c5d87c80 100644
--- a/celery/backends/__init__.py
+++ b/celery/backends/__init__.py
@@ -34,6 +34,7 @@
'database': 'celery.backends.database:DatabaseBackend',
'cassandra': 'celery.backends.cassandra:CassandraBackend',
'couchbase': 'celery.backends.couchbase:CouchBaseBackend',
+ 'riak': 'celery.backends.riak:RiakBackend',
'disabled': 'celery.backends.base:DisabledBackend',
}
diff --git a/celery/backends/riak.py b/celery/backends/riak.py
new file mode 100644
index 000000000..67cff59b7
--- /dev/null
+++ b/celery/backends/riak.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+"""
+ celery.backends.riak
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Riak result store backend.
+
+"""
+from __future__ import absolute_import, print_function
+
+from datetime import datetime
+
+try:
+ import riak
+ from riak import RiakClient, RiakNode
+ from riak.resolver import last_written_resolver
+except ImportError: # pragma: no cover
+ riak = None # noqa
+
+from kombu.utils.url import _parse_url
+
+from celery import states
+from celery.exceptions import ImproperlyConfigured
+from celery.utils.timeutils import maybe_timedelta
+
+from .base import KeyValueStoreBackend
+
+
+class NonAsciiBucket(Exception):
+ """ Bucket must ne ascii charchters only. """
+
+
+class Validators(object):
+
+ @classmethod
+ def validate_riak_bucket_name(cls, bucket_name):
+ try:
+ bucket_name.decode('ascii')
+ except UnicodeDecodeError as ude:
+ return False
+ return True
+
+
+class RiakBackend(KeyValueStoreBackend):
+ # use protobuf by default?
+ bucket_name = "default"
+ host = 'localhost'
+ port = 8087
+
+ # supports_autoexpire = False
+
+ def __init__(self, host=None, port=None, bucket_name=None, protocol=None,
+ url=None, *args, **kwargs):
+ """Initialize Riak backend instance.
+
+ :raises celery.exceptions.ImproperlyConfigured: if
+ module :mod:`riak` is not available.
+ """
+ super(RiakBackend, self).__init__(*args, **kwargs)
+
+ self.expires = kwargs.get('expires') or maybe_timedelta(
+ self.app.conf.CELERY_TASK_RESULT_EXPIRES)
+
+ if not riak:
+ raise ImproperlyConfigured(
+ 'You need to install the riak library to use the '
+ 'Riak backend.')
+
+ uhost = uport = uname = upass = ubucket = None
+ if url:
+ _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url)
+ if ubucket:
+ ubucket = ubucket.strip('/')
+
+ config = self.app.conf.get('CELERY_RIAK_BACKEND_SETTINGS', None)
+ if config is not None:
+ if not isinstance(config, dict):
+ raise ImproperlyConfigured(
+ 'Riak backend settings should be grouped in a dict')
+ else:
+ config = {}
+
+ self.host = uhost or config.get('host', self.host)
+ self.port = int(uport or config.get('port', self.port))
+ self.bucket_name = ubucket or config.get('bucket', self.bucket_name)
+
+ # riak bucket must be ascii letters or numbers only
+ if not Validators.validate_riak_bucket_name(self.bucket_name):
+ raise NonAsciiBucket("Riak bucket names must be ASCII characters")
+
+ self._client = None
+
+ def _get_client(self):
+ """Get client connection"""
+ if self._client is None or not self._client.is_alive():
+ kwargs = {
+ 'host': self.host,
+ 'port': self.port
+ }
+ if self.port:
+ kwargs.update({'port': self.port})
+ logging.debug("riak settings %s" % kwargs)
+ self._client = RiakClient(protocol='pbc',
+ host=kwargs.get('host'),
+ pb_port=kwargs.get('port'))
+ self._client.resolver = last_written_resolver
+ return self._client
+
+ def _get_bucket(self):
+ """Connect to our bucket"""
+ if (
+ self._client is None or not self._client.is_alive()
+ or not self._bucket
+ ):
+ self._bucket = self.client.bucket(self.bucket_name)
+ return self._bucket
+
+ @property
+ def client(self):
+ return self._get_client()
+
+ @property
+ def bucket(self):
+ return self._get_bucket()
+
+ def get(self, key):
+ return self.bucket.get(key).data
+
+ def set(self, key, value):
+ # RiakBucket.new(key=None, data=None, content_type='application/json',
+ # encoded_data=None)
+ _key = self.bucket.new(key, data=value)
+ _key.store()
+
+ def mget(self, keys):
+ return [self.get(key).data for key in keys]
+
+ def delete(self, key):
+ self.bucket.delete(key)
diff --git a/celery/tests/backends/test_riak.py b/celery/tests/backends/test_riak.py
new file mode 100644
index 000000000..fd2a3728f
--- /dev/null
+++ b/celery/tests/backends/test_riak.py
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+
+from __future__ import absolute_import, with_statement
+
+from mock import MagicMock, Mock, patch, sentinel
+from nose import SkipTest
+
+from celery import Celery
+from celery.backends import riak as module
+from celery.backends.riak import RiakBackend, riak, NonAsciiBucket
+from celery.exceptions import ImproperlyConfigured
+from celery.tests.utils import AppCase
+
+
+RIAK_BUCKET = 'riak_bucket'
+
+
+class test_RiakBackend(AppCase):
+
+ def setUp(self):
+ if riak is None:
+ raise SkipTest('riak is not installed.')
+ from celery.app import current_app
+ self.app = self._current_app = current_app()
+ self.backend = RiakBackend(app=self.app)
+
+ def test_init_no_riak(self):
+ """
+ test init no riak raises
+ """
+ prev, module.riak = module.riak, None
+ try:
+ with self.assertRaises(ImproperlyConfigured):
+ RiakBackend(app=self.app)
+ finally:
+ module.riak = prev
+
+ def test_init_no_settings(self):
+ """
+ test init no settings
+ """
+ celery = Celery(set_as_current=False)
+ celery.conf.CELERY_RIAK_BACKEND_SETTINGS = []
+ with self.assertRaises(ImproperlyConfigured):
+ RiakBackend(app=celery)
+
+ def test_init_settings_is_None(self):
+ """
+ Test init settings is None
+ """
+ celery = Celery(set_as_current=False)
+ celery.conf.CELERY_RIAK_BACKEND_SETTINGS = None
+ RiakBackend(app=celery)
+
+ def test_get_client_client_exists(self):
+ """
+ Test get existing client
+ """
+ with patch('riak.client.RiakClient') as mock_connection:
+ self.backend._client = sentinel._client
+
+ mocked_is_alive = self.backend._client.is_alive = Mock()
+ mocked_is_alive.return_value.value = True
+ client = self.backend._get_client()
+ self.assertEquals(sentinel._client, client)
+ self.assertFalse(mock_connection.called)
+
+ def test_get(self):
+ """
+ Test get
+ RiakBackend.get
+ should return and take two params
+ db conn to riak is mocked
+ TODO Should test on key not exists
+ """
+ celery = Celery(set_as_current=False)
+
+ celery.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {}
+
+ backend = RiakBackend(app=celery)
+ backend._client = Mock()
+ backend._bucket = Mock()
+ mocked_get = backend._bucket.get = Mock()
+ mocked_get.return_value.data = sentinel.retval
+ # should return None
+ self.assertEqual(backend.get('1f3fab'), sentinel.retval)
+ backend._bucket.get.assert_called_once_with('1f3fab')
+
+ def test_set(self):
+ """
+ Test set
+ RiakBackend.set
+ should return None and take two params
+ db conn to couchbase is mocked
+ """
+ celery = Celery(set_as_current=False)
+ celery.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None
+ backend = RiakBackend(app=celery)
+ backend._client = MagicMock()
+ backend._bucket = MagicMock()
+ backend._bucket.set = MagicMock()
+ # should return None
+ self.assertIsNone(backend.set(sentinel.key, sentinel.value))
+
+ def test_delete(self):
+ """
+ Test get
+ RiakBackend.get
+ should return and take two params
+ db conn to couchbase is mocked
+ TODO Should test on key not exists
+ """
+ celery = Celery(set_as_current=False)
+
+ celery.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {}
+
+ backend = RiakBackend(app=celery)
+ backend._client = Mock()
+ backend._bucket = Mock()
+ mocked_delete = backend._client.delete = Mock()
+ mocked_delete.return_value = None
+ # should return None
+ self.assertIsNone(backend.delete('1f3fab'))
+ backend._bucket.delete.assert_called_once_with('1f3fab')
+
+ def test_config_params(self):
+ """
+ test celery.conf.CELERY_RIAK_BACKEND_SETTINGS
+ celery.conf.CELERY_RIAK_BACKEND_SETTINGS
+ is properly set
+ """
+ celery = Celery(set_as_current=False)
+ celery.conf.CELERY_RIAK_BACKEND_SETTINGS = {'bucket': 'mycoolbucket',
+ 'host': 'there.host.com',
+ 'port': '1234'}
+ backend = RiakBackend(app=celery)
+ self.assertEqual(backend.bucket_name, "mycoolbucket")
+ self.assertEqual(backend.host, 'there.host.com')
+ self.assertEqual(backend.port, 1234)
+
+ def test_backend_by_url(self, url='riak://myhost/mycoolbucket'):
+ """
+ test get backend by url
+ """
+ from celery.backends.riak import RiakBackend
+ backend, url_ = backends.get_backend_by_url(url)
+ self.assertIs(backend, RiakBackend)
+ self.assertEqual(url_, url)
+
+ def test_backend_params_by_url(self):
+ """
+ test get backend params by url
+ """
+ celery = Celery(set_as_current=False,
+ backend='riak://myhost:123/mycoolbucket')
+ backend = celery.backend
+ self.assertEqual(backend.bucket_name, "mycoolbucket")
+ self.assertEqual(backend.host, "myhost")
+ self.assertEqual(backend.port, 123)
+
+ def test_non_ASCII_bucket_raises(self):
+ """
+ test celery.conf.CELERY_RIAK_BACKEND_SETTINGS
+ celery.conf.CELERY_RIAK_BACKEND_SETTINGS
+ is properly set
+ """
+ with self.assertRaises(NonAsciiBucket):
+ celery = Celery(set_as_current=False)
+ celery.conf.CELERY_RIAK_BACKEND_SETTINGS = {
+ 'bucket': 'héhé',
+ 'host': 'there.host.com',
+ 'port': '1234',
+ }
+ RiakBackend(app=celery)
diff --git a/requirements/test-ci.txt b/requirements/test-ci.txt
index a220100d7..275494e1a 100644
--- a/requirements/test-ci.txt
+++ b/requirements/test-ci.txt
@@ -1,5 +1,6 @@
coverage>=3.0
redis
+#riak >=2.0
#pymongo
#SQLAlchemy
#PyOpenSSL
From 6511e70585ad2b3440eafc9af17906457a48a6b9 Mon Sep 17 00:00:00 2001
From: Gilles Dartiguelongue
Date: Mon, 14 Oct 2013 11:15:50 +0200
Subject: [PATCH 0002/1103] Add some documentation on class attributes
Also add handling of protocol.
---
celery/backends/riak.py | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/celery/backends/riak.py b/celery/backends/riak.py
index 67cff59b7..4beb9c550 100644
--- a/celery/backends/riak.py
+++ b/celery/backends/riak.py
@@ -42,9 +42,17 @@ def validate_riak_bucket_name(cls, bucket_name):
class RiakBackend(KeyValueStoreBackend):
- # use protobuf by default?
+ # TODO: allow using other protocols than protobuf ?
+ #: default protocol used to connect to Riak, might be `http` or `pbc`
+ protocol = 'pbc'
+
+ #: default Riak bucket name (`default`)
bucket_name = "default"
+
+ #: default Riak server hostname (`localhost`)
host = 'localhost'
+
+ #: default Riak server port (8087)
port = 8087
# supports_autoexpire = False
@@ -68,7 +76,7 @@ def __init__(self, host=None, port=None, bucket_name=None, protocol=None,
uhost = uport = uname = upass = ubucket = None
if url:
- _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url)
+ uprot, uhost, uport, uname, upass, ubucket, _ = _parse_url(url)
if ubucket:
ubucket = ubucket.strip('/')
@@ -83,6 +91,7 @@ def __init__(self, host=None, port=None, bucket_name=None, protocol=None,
self.host = uhost or config.get('host', self.host)
self.port = int(uport or config.get('port', self.port))
self.bucket_name = ubucket or config.get('bucket', self.bucket_name)
+ self.protocol = uprot or config.get('protocol', self.protocol)
# riak bucket must be ascii letters or numbers only
if not Validators.validate_riak_bucket_name(self.bucket_name):
@@ -100,7 +109,7 @@ def _get_client(self):
if self.port:
kwargs.update({'port': self.port})
logging.debug("riak settings %s" % kwargs)
- self._client = RiakClient(protocol='pbc',
+ self._client = RiakClient(protocol=self.protocol,
host=kwargs.get('host'),
pb_port=kwargs.get('port'))
self._client.resolver = last_written_resolver
From 844d9a6ef498ec4031460654398e16485085beca Mon Sep 17 00:00:00 2001
From: Gilles Dartiguelongue
Date: Mon, 14 Oct 2013 14:23:07 +0200
Subject: [PATCH 0003/1103] Cleanup code related to connection to Riak
This is a leftover of previous work on this backend.
---
celery/backends/riak.py | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/celery/backends/riak.py b/celery/backends/riak.py
index 4beb9c550..b1c1d40af 100644
--- a/celery/backends/riak.py
+++ b/celery/backends/riak.py
@@ -102,16 +102,9 @@ def __init__(self, host=None, port=None, bucket_name=None, protocol=None,
def _get_client(self):
"""Get client connection"""
if self._client is None or not self._client.is_alive():
- kwargs = {
- 'host': self.host,
- 'port': self.port
- }
- if self.port:
- kwargs.update({'port': self.port})
- logging.debug("riak settings %s" % kwargs)
self._client = RiakClient(protocol=self.protocol,
- host=kwargs.get('host'),
- pb_port=kwargs.get('port'))
+ host=self.host,
+ pb_port=self.port)
self._client.resolver = last_written_resolver
return self._client
From c4a372a15bd16cf4ca39751429ab9b1ba470f164 Mon Sep 17 00:00:00 2001
From: Gilles Dartiguelongue
Date: Mon, 14 Oct 2013 15:06:10 +0200
Subject: [PATCH 0004/1103] Switch default bucket name to celery
---
celery/backends/riak.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/backends/riak.py b/celery/backends/riak.py
index b1c1d40af..ebdcca813 100644
--- a/celery/backends/riak.py
+++ b/celery/backends/riak.py
@@ -47,7 +47,7 @@ class RiakBackend(KeyValueStoreBackend):
protocol = 'pbc'
#: default Riak bucket name (`default`)
- bucket_name = "default"
+ bucket_name = "celery"
#: default Riak server hostname (`localhost`)
host = 'localhost'
From 4859492883757340eb16ad9858d57263a1468281 Mon Sep 17 00:00:00 2001
From: Gilles Dartiguelongue
Date: Mon, 14 Oct 2013 15:36:38 +0200
Subject: [PATCH 0005/1103] Add riak to extra requirements
---
requirements/extras/riak.txt | 1 +
setup.py | 1 +
2 files changed, 2 insertions(+)
create mode 100644 requirements/extras/riak.txt
diff --git a/requirements/extras/riak.txt b/requirements/extras/riak.txt
new file mode 100644
index 000000000..b6bfed133
--- /dev/null
+++ b/requirements/extras/riak.txt
@@ -0,0 +1 @@
+riak >=2.0
diff --git a/setup.py b/setup.py
index a787d647d..d5dac4c07 100644
--- a/setup.py
+++ b/setup.py
@@ -199,6 +199,7 @@ def reqs(*f):
'mongodb': extras('mongodb.txt'),
'sqs': extras('sqs.txt'),
'couchdb': extras('couchdb.txt'),
+ 'riak': extras('riak.txt'),
'beanstalk': extras('beanstalk.txt'),
'zookeeper': extras('zookeeper.txt'),
'zeromq': extras('zeromq.txt'),
From 2b79d6d5d69e22234e119e77d25020c6e1c466d4 Mon Sep 17 00:00:00 2001
From: Gilles Dartiguelongue
Date: Mon, 14 Oct 2013 15:06:27 +0200
Subject: [PATCH 0006/1103] Add Riak backend documentation
---
docs/configuration.rst | 65 ++++++++++++++++++++++++++++++++++
docs/includes/installation.txt | 3 ++
2 files changed, 68 insertions(+)
diff --git a/docs/configuration.rst b/docs/configuration.rst
index ac113dbde..a44b06903 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -628,6 +628,71 @@ Example configuration
'max_retries': 10
}
+.. _conf-riak-result-backend:
+
+Riak backend settings
+---------------------
+
+.. note::
+
+ The Riak backend requires the :mod:`riak` library:
+ http://pypi.python.org/pypi/riak/
+
+ To install the riak package use `pip` or `easy_install`:
+
+ .. code-block:: bash
+
+ $ pip install riak
+
+This backend requires the :setting:`CELERY_RESULT_BACKEND`
+setting to be set to a Riak URL::
+
+ CELERY_RESULT_BACKEND = "riak://host:port/bucket"
+
+For example::
+
+ CELERY_RESULT_BACKEND = "riak://localhost/celery
+
+which is the same as::
+
+ CELERY_RESULT_BACKEND = "riak://"
+
+The fields of the URL is defined as folows:
+
+- *host*
+
+Host name or IP address of the Riak server. e.g. `"localhost"`.
+
+- *port*
+
+Port to the Riak server using the protobuf protocol. Default is 8087.
+
+- *bucket*
+
+Bucket name to use. Default is `celery`.
+The bucket needs to be a string with ascii characters only.
+
+Altenatively, this backend can be configured with the following configuration directives.
+
+.. setting:: CELERY_RIAK_BACKEND_SETTINGS
+
+CELERY_RIAK_BACKEND_SETTINGS
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is a dict supporting the following keys:
+
+* host
+ The host name of the Riak server. Defaults to "localhost".
+
+* port
+ The port the Riak server is listening to. Defaults to 8087.
+
+* bucket
+ The bucket name to connect to. Defaults to "celery".
+
+* protocol
+ The protocol to use to connect to the Riak server. This is not configurable
+ via :setting:`CELERY_RESULT_BACKEND`
.. _conf-ironcache-result-backend:
diff --git a/docs/includes/installation.txt b/docs/includes/installation.txt
index fda618d08..e568ebc52 100644
--- a/docs/includes/installation.txt
+++ b/docs/includes/installation.txt
@@ -85,6 +85,9 @@ Transports and Backends
:celery[couchbase]:
for using CouchBase as a result backend.
+:celery[riak]:
+ for using Riak as a result backend.
+
:celery[beanstalk]:
for using Beanstalk as a message transport.
From 1ac10f3b8f2bc0a21b7e418ee6c967df614bd106 Mon Sep 17 00:00:00 2001
From: NoKriK
Date: Tue, 15 Oct 2013 11:42:26 +0200
Subject: [PATCH 0007/1103] Fixed error in protocol handling for the riak
backend
---
celery/backends/riak.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/backends/riak.py b/celery/backends/riak.py
index ebdcca813..725e396a5 100644
--- a/celery/backends/riak.py
+++ b/celery/backends/riak.py
@@ -91,7 +91,7 @@ def __init__(self, host=None, port=None, bucket_name=None, protocol=None,
self.host = uhost or config.get('host', self.host)
self.port = int(uport or config.get('port', self.port))
self.bucket_name = ubucket or config.get('bucket', self.bucket_name)
- self.protocol = uprot or config.get('protocol', self.protocol)
+ self.protocol = protocol or config.get('protocol', self.protocol)
# riak bucket must be ascii letters or numbers only
if not Validators.validate_riak_bucket_name(self.bucket_name):
From 7ad7a57fa12da3cde49c0f043c40b2bc5789c459 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 22 Nov 2013 17:06:43 +0000
Subject: [PATCH 0008/1103] Implements steeves dynamic tasks as task.replace.
Issue #817
---
celery/app/task.py | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/celery/app/task.py b/celery/app/task.py
index ee96c5c6b..d92e1a2e1 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -661,6 +661,17 @@ def retry(self, args=None, kwargs=None, exc=None, throw=True,
raise ret
return ret
+ def replace(self, sig):
+ request = self.request
+ sig.set_immutable(True)
+ chord_id, request.chord = request.chord, None
+ group_id, request.group = request.group, None
+ callbacks, request.callbacks = request.callbacks, [sig]
+ if group_id or chord_id:
+ sig.set(group=group_id, chord=chord_id)
+ sig |= callbacks[0]
+ return sig
+
def apply(self, args=None, kwargs=None,
link=None, link_error=None, **options):
"""Execute this task locally, by blocking until the task returns.
From dc4634298f557a2f898a9bc4772fffb79c45eede Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 23 Jan 2014 15:28:25 +0000
Subject: [PATCH 0009/1103] Use a heapq to avoid calling all entries for every
iteration
---
celery/beat.py | 73 +++++++++++++++++++++++++++++++-------------------
1 file changed, 45 insertions(+), 28 deletions(-)
diff --git a/celery/beat.py b/celery/beat.py
index f93e3bbf9..8473017d7 100644
--- a/celery/beat.py
+++ b/celery/beat.py
@@ -9,12 +9,14 @@
from __future__ import absolute_import
import errno
+import heapq
import os
import time
import shelve
import sys
import traceback
+from collections import namedtuple
from threading import Event, Thread
from billiard import Process, ensure_multiprocessing
@@ -34,6 +36,8 @@
__all__ = ['SchedulingError', 'ScheduleEntry', 'Scheduler',
'PersistentScheduler', 'Service', 'EmbeddedService']
+event_t = namedtuple('event_t', ('time', 'priority', 'entry'))
+
logger = get_logger(__name__)
debug, info, error, warning = (logger.debug, logger.info,
logger.error, logger.warning)
@@ -173,6 +177,7 @@ def __init__(self, app, schedule=None, max_interval=None,
or app.conf.CELERYBEAT_MAX_LOOP_INTERVAL
or self.max_interval)
self.Publisher = Publisher or app.amqp.TaskProducer
+ self._heap = None
if not lazy:
self.setup_schedule()
@@ -191,32 +196,45 @@ def maybe_due(self, entry, publisher=None):
is_due, next_time_to_run = entry.is_due()
if is_due:
- info('Scheduler: Sending due task %s (%s)', entry.name, entry.task)
- try:
- result = self.apply_async(entry, publisher=publisher)
- except Exception as exc:
- error('Message Error: %s\n%s',
- exc, traceback.format_stack(), exc_info=True)
- else:
- debug('%s sent. id->%s', entry.task, result.id)
+ self.apply_entry(entry, producer=publisher, advance=True)
return next_time_to_run
- def tick(self):
+ def apply_entry(self, entry, producer=None):
+ info('Scheduler: Sending due task %s (%s)', entry.name, entry.task)
+ try:
+ result = self.apply_async(entry, producer=producer, advance=False)
+ except Exception as exc:
+ error('Message Error: %s\n%s',
+ exc, traceback.format_stack(), exc_info=True)
+ else:
+ debug('%s sent. id->%s', entry.task, result.id)
+
+ def tick(self, event_t=event_t, min=min,
+ heappop=heapq.heappop, heappush=heapq.heappush):
"""Run a tick, that is one iteration of the scheduler.
Executes all due tasks.
"""
- remaining_times = []
- try:
- for entry in values(self.schedule):
- next_time_to_run = self.maybe_due(entry, self.publisher)
- if next_time_to_run:
- remaining_times.append(next_time_to_run)
- except RuntimeError:
- pass
-
- return min(remaining_times + [self.max_interval])
+ H = self._heap
+ if H is None:
+ H = self._heap = [event_t(e.is_due()[1], 5, e)
+ for e in values(self.schedule)]
+ print('HEAP: %r' % (H, ))
+ event = H[0]
+ entry = event[2]
+ is_due, next_time_to_run = entry.is_due()
+ if is_due:
+ verify = heappop(H)
+ if verify is event:
+ next_entry = self.reserve(entry)
+ self.apply_entry(entry, producer=self.publisher)
+ heappush(H, event_t(next_time_to_run, event[1], next_entry))
+ return 0
+ else:
+ heappush(H, verify)
+ return min(verify[0], self.max_interval)
+ return min(next_time_to_run, self.max_interval)
def should_sync(self):
return (not self._last_sync or
@@ -226,22 +244,22 @@ def reserve(self, entry):
new_entry = self.schedule[entry.name] = next(entry)
return new_entry
- def apply_async(self, entry, publisher=None, **kwargs):
+ def apply_async(self, entry, producer=None, advance=True, **kwargs):
# Update timestamps and run counts before we actually execute,
# so we have that done if an exception is raised (doesn't schedule
# forever.)
- entry = self.reserve(entry)
+ entry = self.reserve(entry) if advance else entry
task = self.app.tasks.get(entry.task)
try:
if task:
- result = task.apply_async(entry.args, entry.kwargs,
- publisher=publisher,
- **entry.options)
- else:
- result = self.send_task(entry.task, entry.args, entry.kwargs,
- publisher=publisher,
+ return task.apply_async(entry.args, entry.kwargs,
+ producer=producer,
**entry.options)
+ else:
+ return self.send_task(entry.task, entry.args, entry.kwargs,
+ producer=producer,
+ **entry.options)
except Exception as exc:
reraise(SchedulingError, SchedulingError(
"Couldn't apply scheduled task {0.name}: {exc}".format(
@@ -249,7 +267,6 @@ def apply_async(self, entry, publisher=None, **kwargs):
finally:
if self.should_sync():
self._do_sync()
- return result
def send_task(self, *args, **kwargs):
return self.app.send_task(*args, **kwargs)
From 044c8a462117c27cb27133f5ea484bb804f16a6e Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 23 Jan 2014 15:30:55 +0000
Subject: [PATCH 0010/1103] Remove print
---
celery/beat.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/celery/beat.py b/celery/beat.py
index 8473017d7..48aad7fb4 100644
--- a/celery/beat.py
+++ b/celery/beat.py
@@ -220,7 +220,6 @@ def tick(self, event_t=event_t, min=min,
if H is None:
H = self._heap = [event_t(e.is_due()[1], 5, e)
for e in values(self.schedule)]
- print('HEAP: %r' % (H, ))
event = H[0]
entry = event[2]
is_due, next_time_to_run = entry.is_due()
From 95cdbac9683a89fd35429ce43129d191c6165bae Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Sat, 22 Mar 2014 16:31:18 +0000
Subject: [PATCH 0011/1103] Tox: Add debug test output
---
tox.ini | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/tox.ini b/tox.ini
index e532ca2d4..d2a0ac4a3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -16,7 +16,7 @@ deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test3.txt
-r{toxinidir}/requirements/test-ci.txt
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
+ C_DEBUG_TEST=1 nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:3.3]
basepython = python3.3
@@ -24,7 +24,7 @@ deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test3.txt
-r{toxinidir}/requirements/test-ci.txt
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
+ C_DEBUG_TEST=1 nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:2.7]
basepython = python2.7
@@ -32,7 +32,7 @@ deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/test-ci.txt
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
+ C_DEBUG_TEST=1 nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:2.6]
basepython = python2.6
@@ -40,7 +40,7 @@ deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/test-ci.txt
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
+ C_DEBUG_TEST=1 nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:pypy]
basepython = pypy
@@ -48,4 +48,4 @@ deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/test-ci.txt
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
+ C_DEBUG_TEST=1 nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
From 5254f9a75b3a2c677fd7165de10fd09bbfd48232 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Sat, 22 Mar 2014 16:37:32 +0000
Subject: [PATCH 0012/1103] Tox: Set environment variables properly
---
.travis.yml | 10 +++++-----
tox.ini | 15 ++++++++++-----
2 files changed, 15 insertions(+), 10 deletions(-)
diff --git a/.travis.yml b/.travis.yml
index a30e3602f..c8341f045 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,10 +4,10 @@ env:
global:
PYTHONUNBUFFERED=yes
matrix:
- - TOXENV=2.6
- - TOXENV=2.7
- - TOXENV=3.3
- - TOXENV=3.4
+ - TOXENV=2.6
+ - TOXENV=2.7
+ - TOXENV=3.3
+ - TOXENV=3.4
- TOXENV=pypy
before_install:
- |
@@ -39,4 +39,4 @@ notifications:
channels:
- "chat.freenode.net#celery"
on_success: always
- on_failure: always
\ No newline at end of file
+ on_failure: always
diff --git a/tox.ini b/tox.ini
index d2a0ac4a3..d8605c74d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -15,37 +15,42 @@ basepython = python3.4
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test3.txt
-r{toxinidir}/requirements/test-ci.txt
+setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- C_DEBUG_TEST=1 nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
+ nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:3.3]
basepython = python3.3
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test3.txt
-r{toxinidir}/requirements/test-ci.txt
+setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- C_DEBUG_TEST=1 nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
+ nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:2.7]
basepython = python2.7
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/test-ci.txt
+setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- C_DEBUG_TEST=1 nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
+ nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:2.6]
basepython = python2.6
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/test-ci.txt
+setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- C_DEBUG_TEST=1 nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
+ nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:pypy]
basepython = pypy
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/test-ci.txt
+setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- C_DEBUG_TEST=1 nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
+ nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
From f333abc09168e5fd2ec9fd30a3ee80cc510e6513 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ionel=20Cristian=20M=C4=83rie=C8=99?=
Date: Sat, 1 Mar 2014 14:00:57 +0200
Subject: [PATCH 0013/1103] Make the database backend retry operations on
ResourceClosedError and StaleDataError too. Make the operations close the
connection if failure occurs (can't retry on broken connection). Fixes #1786.
---
celery/backends/database/__init__.py | 69 ++++++++++++++++------------
1 file changed, 40 insertions(+), 29 deletions(-)
diff --git a/celery/backends/database/__init__.py b/celery/backends/database/__init__.py
index 58109e782..dea6c833a 100644
--- a/celery/backends/database/__init__.py
+++ b/celery/backends/database/__init__.py
@@ -8,18 +8,22 @@
"""
from __future__ import absolute_import
+import logging
+from contextlib import contextmanager
from functools import wraps
from celery import states
+from celery.backends.base import BaseBackend
from celery.exceptions import ImproperlyConfigured
from celery.five import range
from celery.utils.timeutils import maybe_timedelta
-from celery.backends.base import BaseBackend
-
-from .models import Task, TaskSet
+from .models import Task
+from .models import TaskSet
from .session import ResultSession
+logger = logging.getLogger(__name__)
+
__all__ = ['DatabaseBackend']
@@ -33,7 +37,21 @@ def _sqlalchemy_installed():
return sqlalchemy
_sqlalchemy_installed()
-from sqlalchemy.exc import DatabaseError, OperationalError
+from sqlalchemy.exc import DatabaseError, OperationalError, ResourceClosedError
+from sqlalchemy.orm.exc import StaleDataError
+
+
+@contextmanager
+def session_cleanup(session):
+ try:
+ yield
+ except (DatabaseError, OperationalError, ResourceClosedError, StaleDataError):
+ session.rollback()
+ session.connection().invalidate()
+ session.close()
+ raise
+ else:
+ session.close()
def retry(fun):
@@ -45,7 +63,12 @@ def _inner(*args, **kwargs):
for retries in range(max_retries):
try:
return fun(*args, **kwargs)
- except (DatabaseError, OperationalError):
+ except (DatabaseError, OperationalError, ResourceClosedError, StaleDataError):
+ logger.critical(
+ "Failed operation %s. Retrying %s more times.",
+ fun.__name__, max_retries - retries - 1,
+ exc_info=True,
+ )
if retries + 1 >= max_retries:
raise
@@ -95,8 +118,9 @@ def _store_result(self, task_id, result, status,
traceback=None, max_retries=3, **kwargs):
"""Store return value and status of an executed task."""
session = self.ResultSession()
- try:
- task = session.query(Task).filter(Task.task_id == task_id).first()
+ with session_cleanup(session):
+ task = list(session.query(Task).filter(Task.task_id == task_id))
+ task = task and task[0]
if not task:
task = Task(task_id)
session.add(task)
@@ -106,83 +130,70 @@ def _store_result(self, task_id, result, status,
task.traceback = traceback
session.commit()
return result
- finally:
- session.close()
@retry
def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""
session = self.ResultSession()
- try:
- task = session.query(Task).filter(Task.task_id == task_id).first()
- if task is None:
+ with session_cleanup(session):
+ task = list(session.query(Task).filter(Task.task_id == task_id))
+ task = task and task[0]
+ if not task:
task = Task(task_id)
task.status = states.PENDING
task.result = None
return task.to_dict()
- finally:
- session.close()
@retry
def _save_group(self, group_id, result):
"""Store the result of an executed group."""
session = self.ResultSession()
- try:
+ with session_cleanup(session):
group = TaskSet(group_id, result)
session.add(group)
session.flush()
session.commit()
return result
- finally:
- session.close()
@retry
def _restore_group(self, group_id):
"""Get metadata for group by id."""
session = self.ResultSession()
- try:
+ with session_cleanup(session):
group = session.query(TaskSet).filter(
TaskSet.taskset_id == group_id).first()
if group:
return group.to_dict()
- finally:
- session.close()
@retry
def _delete_group(self, group_id):
"""Delete metadata for group by id."""
session = self.ResultSession()
- try:
+ with session_cleanup(session):
session.query(TaskSet).filter(
TaskSet.taskset_id == group_id).delete()
session.flush()
session.commit()
- finally:
- session.close()
@retry
def _forget(self, task_id):
"""Forget about result."""
session = self.ResultSession()
- try:
+ with session_cleanup(session):
session.query(Task).filter(Task.task_id == task_id).delete()
session.commit()
- finally:
- session.close()
def cleanup(self):
"""Delete expired metadata."""
session = self.ResultSession()
expires = self.expires
now = self.app.now()
- try:
+ with session_cleanup(session):
session.query(Task).filter(
Task.date_done < (now - expires)).delete()
session.query(TaskSet).filter(
TaskSet.date_done < (now - expires)).delete()
session.commit()
- finally:
- session.close()
def __reduce__(self, args=(), kwargs={}):
kwargs.update(
From 688c3fe2eeacd4c3e9e6bce765f306bd44ea7786 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ionel=20Cristian=20M=C4=83rie=C8=99?=
Date: Sun, 2 Mar 2014 18:18:00 +0200
Subject: [PATCH 0014/1103] Don't close the session. Just rollback and/or
close.
---
celery/backends/database/__init__.py | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/celery/backends/database/__init__.py b/celery/backends/database/__init__.py
index dea6c833a..ffcb6113b 100644
--- a/celery/backends/database/__init__.py
+++ b/celery/backends/database/__init__.py
@@ -37,7 +37,7 @@ def _sqlalchemy_installed():
return sqlalchemy
_sqlalchemy_installed()
-from sqlalchemy.exc import DatabaseError, OperationalError, ResourceClosedError
+from sqlalchemy.exc import DatabaseError, OperationalError, ResourceClosedError, InvalidRequestError
from sqlalchemy.orm.exc import StaleDataError
@@ -45,12 +45,10 @@ def _sqlalchemy_installed():
def session_cleanup(session):
try:
yield
- except (DatabaseError, OperationalError, ResourceClosedError, StaleDataError):
+ except Exception:
session.rollback()
- session.connection().invalidate()
- session.close()
raise
- else:
+ finally:
session.close()
@@ -63,8 +61,8 @@ def _inner(*args, **kwargs):
for retries in range(max_retries):
try:
return fun(*args, **kwargs)
- except (DatabaseError, OperationalError, ResourceClosedError, StaleDataError):
- logger.critical(
+ except (DatabaseError, OperationalError, ResourceClosedError, StaleDataError, InvalidRequestError):
+ logger.warning(
"Failed operation %s. Retrying %s more times.",
fun.__name__, max_retries - retries - 1,
exc_info=True,
From 7be5028d9abc94b70f16a4ee29beb78876718f66 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ionel=20Cristian=20M=C4=83rie=C8=99?=
Date: Mon, 3 Mar 2014 22:02:57 +0200
Subject: [PATCH 0015/1103] Close the sessions before disposing the engines as
the engines won't close connections held up in sessions.
---
celery/backends/database/session.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/celery/backends/database/session.py b/celery/backends/database/session.py
index fef3843e4..cb25b5c95 100644
--- a/celery/backends/database/session.py
+++ b/celery/backends/database/session.py
@@ -29,10 +29,12 @@ class _after_fork(object):
def __call__(self):
self.registered = False # child must reregister
+ for session in _SESSIONS:
+ session.close()
+ _SESSIONS.clear()
for engine in list(_ENGINES.values()):
engine.dispose()
_ENGINES.clear()
- _SESSIONS.clear()
after_fork = _after_fork()
From 6e8ab99dc94c9461fc433763c9a693dac19b188a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ionel=20Cristian=20M=C4=83rie=C8=99?=
Date: Tue, 4 Mar 2014 00:03:01 +0200
Subject: [PATCH 0016/1103] Use a session manager that has different behavior
before the fork (effectivelly hardcodes NullPool - everything else is
unreliable).
---
celery/backends/database/__init__.py | 13 ++--
celery/backends/database/session.py | 97 +++++++++++++---------------
2 files changed, 54 insertions(+), 56 deletions(-)
diff --git a/celery/backends/database/__init__.py b/celery/backends/database/__init__.py
index ffcb6113b..1dbcb5e32 100644
--- a/celery/backends/database/__init__.py
+++ b/celery/backends/database/__init__.py
@@ -20,7 +20,7 @@
from .models import Task
from .models import TaskSet
-from .session import ResultSession
+from .session import SessionManager
logger = logging.getLogger(__name__)
@@ -37,7 +37,7 @@ def _sqlalchemy_installed():
return sqlalchemy
_sqlalchemy_installed()
-from sqlalchemy.exc import DatabaseError, OperationalError, ResourceClosedError, InvalidRequestError
+from sqlalchemy.exc import DatabaseError, OperationalError, ResourceClosedError, InvalidRequestError, IntegrityError
from sqlalchemy.orm.exc import StaleDataError
@@ -61,7 +61,10 @@ def _inner(*args, **kwargs):
for retries in range(max_retries):
try:
return fun(*args, **kwargs)
- except (DatabaseError, OperationalError, ResourceClosedError, StaleDataError, InvalidRequestError):
+ except (
+ DatabaseError, OperationalError, ResourceClosedError, StaleDataError, InvalidRequestError,
+ IntegrityError
+ ):
logger.warning(
"Failed operation %s. Retrying %s more times.",
fun.__name__, max_retries - retries - 1,
@@ -104,8 +107,8 @@ def __init__(self, dburi=None, expires=None,
'Missing connection string! Do you have '
'CELERY_RESULT_DBURI set to a real value?')
- def ResultSession(self):
- return ResultSession(
+ def ResultSession(self, session_manager=SessionManager()):
+ return session_manager.session_factory(
dburi=self.dburi,
short_lived_sessions=self.short_lived_sessions,
**self.engine_options
diff --git a/celery/backends/database/session.py b/celery/backends/database/session.py
index cb25b5c95..1575d7f32 100644
--- a/celery/backends/database/session.py
+++ b/celery/backends/database/session.py
@@ -8,60 +8,55 @@
"""
from __future__ import absolute_import
-from collections import defaultdict
-from multiprocessing.util import register_after_fork
+from billiard.util import register_after_fork
from sqlalchemy import create_engine
-from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.pool import NullPool
ResultModelBase = declarative_base()
-_SETUP = defaultdict(lambda: False)
-_ENGINES = {}
-_SESSIONS = {}
-
-__all__ = ['ResultSession', 'get_engine', 'create_session']
-
-
-class _after_fork(object):
- registered = False
-
- def __call__(self):
- self.registered = False # child must reregister
- for session in _SESSIONS:
- session.close()
- _SESSIONS.clear()
- for engine in list(_ENGINES.values()):
- engine.dispose()
- _ENGINES.clear()
-after_fork = _after_fork()
-
-
-def get_engine(dburi, **kwargs):
- try:
- return _ENGINES[dburi]
- except KeyError:
- engine = _ENGINES[dburi] = create_engine(dburi, **kwargs)
- after_fork.registered = True
- register_after_fork(after_fork, after_fork)
- return engine
-
-
-def create_session(dburi, short_lived_sessions=False, **kwargs):
- engine = get_engine(dburi, **kwargs)
- if short_lived_sessions or dburi not in _SESSIONS:
- _SESSIONS[dburi] = sessionmaker(bind=engine)
- return engine, _SESSIONS[dburi]
-
-
-def setup_results(engine):
- if not _SETUP['results']:
- ResultModelBase.metadata.create_all(engine)
- _SETUP['results'] = True
-
-
-def ResultSession(dburi, **kwargs):
- engine, session = create_session(dburi, **kwargs)
- setup_results(engine)
- return session()
+__all__ = ['SessionManager']
+
+
+class SessionManager(object):
+ def __init__(self):
+ self._engines = {}
+ self._sessions = {}
+ self.forked = False
+ self.prepared = False
+ register_after_fork(self, self._after_fork)
+
+ def _after_fork(self,):
+ self.forked = True
+
+ def get_engine(self, dburi, **kwargs):
+ if self.forked:
+ try:
+ return self._engines[dburi]
+ except KeyError:
+ engine = self._engines[dburi] = create_engine(dburi, **kwargs)
+ return engine
+ else:
+ kwargs['poolclass'] = NullPool
+ return create_engine(dburi, **kwargs)
+
+ def create_session(self, dburi, short_lived_sessions=False, **kwargs):
+ engine = self.get_engine(dburi, **kwargs)
+ if self.forked:
+ if short_lived_sessions or dburi not in self._sessions:
+ self._sessions[dburi] = sessionmaker(bind=engine)
+ return engine, self._sessions[dburi]
+ else:
+ return engine, sessionmaker(bind=engine)
+
+ def prepare_models(self, engine):
+ if not self.prepared:
+ ResultModelBase.metadata.create_all(engine)
+ self.prepared = True
+
+ def session_factory(self, dburi, **kwargs):
+ engine, session = self.create_session(dburi, **kwargs)
+ self.prepare_models(engine)
+ return session()
From df32c477e667e980a24e0c84f74dfb8546771e47 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ionel=20Cristian=20M=C4=83rie=C8=99?=
Date: Sat, 22 Mar 2014 23:09:26 +0200
Subject: [PATCH 0017/1103] Update changelog.
---
Changelog | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/Changelog b/Changelog
index 9ecc78129..aa55bdc7f 100644
--- a/Changelog
+++ b/Changelog
@@ -19,6 +19,12 @@ new in Celery 3.1.
- Now depends on :ref:`Kombu 3.0.14 `.
+- **Results**:
+
+ Reliability improvements to the SQLAlchemy database backend. Previously the
+ connection from the MainProcess was improperly shared with the workers.
+ (Issue #1786)
+
- **Redis:** Important note about events (Issue #1882).
There is a new transport option for Redis that enables monitors
From 59fa886071eedb4991c892784c6fb26898efdfd3 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Sat, 22 Mar 2014 21:33:16 +0000
Subject: [PATCH 0018/1103] Cosmetics for #1736
---
celery/backends/database/__init__.py | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/celery/backends/database/__init__.py b/celery/backends/database/__init__.py
index 1dbcb5e32..c52e75879 100644
--- a/celery/backends/database/__init__.py
+++ b/celery/backends/database/__init__.py
@@ -37,7 +37,7 @@ def _sqlalchemy_installed():
return sqlalchemy
_sqlalchemy_installed()
-from sqlalchemy.exc import DatabaseError, OperationalError, ResourceClosedError, InvalidRequestError, IntegrityError
+from sqlalchemy.exc import DatabaseError, InvalidRequestError
from sqlalchemy.orm.exc import StaleDataError
@@ -61,10 +61,7 @@ def _inner(*args, **kwargs):
for retries in range(max_retries):
try:
return fun(*args, **kwargs)
- except (
- DatabaseError, OperationalError, ResourceClosedError, StaleDataError, InvalidRequestError,
- IntegrityError
- ):
+ except (DatabaseError, InvalidRequestError, StaleDataError):
logger.warning(
"Failed operation %s. Retrying %s more times.",
fun.__name__, max_retries - retries - 1,
From 5ff19addac2992de2ed6a46f307cfcfb7543a384 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Sat, 22 Mar 2014 21:39:20 +0000
Subject: [PATCH 0019/1103] flakes
---
celery/tests/backends/test_database.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/celery/tests/backends/test_database.py b/celery/tests/backends/test_database.py
index fac02215e..6b5bf9420 100644
--- a/celery/tests/backends/test_database.py
+++ b/celery/tests/backends/test_database.py
@@ -42,16 +42,16 @@ def setup(self):
self.uri = 'sqlite:///test.db'
def test_retry_helper(self):
- from celery.backends.database import OperationalError
+ from celery.backends.database import DatabaseError
calls = [0]
@retry
def raises():
calls[0] += 1
- raise OperationalError(1, 2, 3)
+ raise DatabaseError(1, 2, 3)
- with self.assertRaises(OperationalError):
+ with self.assertRaises(DatabaseError):
raises(max_retries=5)
self.assertEqual(calls[0], 5)
From 15b531a9dcc18a85aa8e6488024f3be9330285e1 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Sat, 22 Mar 2014 21:39:33 +0000
Subject: [PATCH 0020/1103] Stress test app now loads on Windows
---
funtests/stress/stress/app.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/funtests/stress/stress/app.py b/funtests/stress/stress/app.py
index e3d72da5e..0a9690cfb 100644
--- a/funtests/stress/stress/app.py
+++ b/funtests/stress/stress/app.py
@@ -91,7 +91,7 @@ def exiting(status=0):
@app.task
-def kill(sig=signal.SIGKILL):
+def kill(sig=getattr(signal, 'SIGKILL', None) or signal.SIGTERM):
os.kill(os.getpid(), sig)
From 620828c26b874fd2c357cd7f8ba4d4ec1dadd0a4 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Sat, 22 Mar 2014 21:43:16 +0000
Subject: [PATCH 0021/1103] Sets release date for 3.1.10
---
Changelog | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/Changelog b/Changelog
index aa55bdc7f..62abdf694 100644
--- a/Changelog
+++ b/Changelog
@@ -12,19 +12,13 @@ new in Celery 3.1.
3.1.10
======
-:release-date: 2014-XX-XX XX:XX X.X UTC
-:release-by: XX
+:release-date: 2014-03-22 09:40 P.M UTC
+:release-by: Ask Solem
- **Requirements**:
- Now depends on :ref:`Kombu 3.0.14 `.
-- **Results**:
-
- Reliability improvements to the SQLAlchemy database backend. Previously the
- connection from the MainProcess was improperly shared with the workers.
- (Issue #1786)
-
- **Redis:** Important note about events (Issue #1882).
There is a new transport option for Redis that enables monitors
@@ -50,7 +44,7 @@ new in Celery 3.1.
This means that the global result cache can finally be disabled,
and you can do so by setting :setting:`CELERY_MAX_CACHED_RESULTS` to
:const:`-1`. The lifetime of the cache will then be bound to the
- lifetime of the result object, and this will be the default behavior
+ lifetime of the result object, which will be the default behavior
in Celery 3.2.
- **Events**: The "Substantial drift" warning message is now logged once
@@ -76,7 +70,7 @@ new in Celery 3.1.
with workers and clients not using it, so be sure to enable
the option in all clients and workers if you decide to use it.
-- **Multi**: With ``-opt:index`` (e.g. ``-c:1``) the index now always refers
+- **Multi**: With ``-opt:index`` (e.g. :option:`-c:1`) the index now always refers
to the position of a node in the argument list.
This means that referring to a number will work when specifying a list
@@ -114,6 +108,14 @@ new in Celery 3.1.
- **Canvas**: A chord task raising an exception will now result in
any errbacks (``link_error``) to the chord callback to also be called.
+- **Results**: Reliability improvements to the SQLAlchemy database backend
+ (Issue #1786).
+
+ Previously the connection from the ``MainProcess`` was improperly
+ inherited by child processes.
+
+ Fix contributed by Ionel Cristian Mărieș.
+
- **Task**: Task callbacks and errbacks are now called using the group
primitive.
From c353c804f12efa3bd861bbc96e3972bc26be5e49 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ionel=20Cristian=20M=C4=83rie=C8=99?=
Date: Sun, 23 Mar 2014 00:39:34 +0200
Subject: [PATCH 0022/1103] Fix issue with timer starting after stop() is
called. Now it doesn't start in this situation.
---
celery/utils/timer2.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/utils/timer2.py b/celery/utils/timer2.py
index d462c6574..e42660c23 100644
--- a/celery/utils/timer2.py
+++ b/celery/utils/timer2.py
@@ -86,8 +86,8 @@ def run(self):
os._exit(1)
def stop(self):
+ self._is_shutdown.set()
if self.running:
- self._is_shutdown.set()
self._is_stopped.wait()
self.join(THREAD_TIMEOUT_MAX)
self.running = False
From e58d4bd6c9614005651c0d3ee69b298be17dbb03 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ionel=20Cristian=20M=C4=83rie=C8=99?=
Date: Mon, 24 Mar 2014 14:08:54 +0200
Subject: [PATCH 0023/1103] Match the signature used in the rest of the code.
---
celery/concurrency/base.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/concurrency/base.py b/celery/concurrency/base.py
index 6b3594a96..29c348d6a 100644
--- a/celery/concurrency/base.py
+++ b/celery/concurrency/base.py
@@ -111,7 +111,7 @@ def on_hard_timeout(self, job):
def maintain_pool(self, *args, **kwargs):
pass
- def terminate_job(self, pid):
+ def terminate_job(self, pid, signal=None):
raise NotImplementedError(
'{0} does not implement kill_job'.format(type(self)))
From 252a31939acff6196cc983b1b1244074879845e1 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 24 Mar 2014 15:46:53 +0000
Subject: [PATCH 0024/1103] Result cache populated by join_native even if cache
disabled
---
celery/backends/base.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/celery/backends/base.py b/celery/backends/base.py
index 2ca4cc001..2a40f0dc9 100644
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -57,8 +57,9 @@ def unpickle_backend(cls, args, kwargs):
class _nulldict(dict):
- def __setitem__(self, k, v):
+ def ignore(self, *a, **kw):
pass
+ __setitem__ = update = setdefault = ignore
class BaseBackend(object):
From 0f23e858bdc61f8864720a780ac992b34b1dfda9 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 24 Mar 2014 16:00:20 +0000
Subject: [PATCH 0025/1103] Stresstests: Delete results after bigtasksbigvalue
test
---
funtests/stress/stress/suite.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/funtests/stress/stress/suite.py b/funtests/stress/stress/suite.py
index f2fb02120..c88ec095e 100755
--- a/funtests/stress/stress/suite.py
+++ b/funtests/stress/stress/suite.py
@@ -246,7 +246,11 @@ def _evil_groupmember(self, evil_t, *eargs, **opts):
def bigtasksbigvalue(self):
g = group(any_returning.s(BIG, sleep=0.3) for i in range(8))
r = g()
- self.join(r, timeout=10)
+ try:
+ self.join(r, timeout=10)
+ finally:
+ # very big values so remove results from backend
+ r.forget()
def bigtasks(self, wait=None):
self._revoketerm(wait, False, False, BIG)
From 8d60d4039760084af2c711726ca81a7a31464dfe Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 25 Mar 2014 11:49:12 +0000
Subject: [PATCH 0026/1103] Task: Do not send error emails for expected errors
(@task(throws=...))
---
celery/worker/job.py | 13 ++++++++-----
1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/celery/worker/job.py b/celery/worker/job.py
index b277520e3..72946d0d3 100644
--- a/celery/worker/job.py
+++ b/celery/worker/job.py
@@ -454,11 +454,13 @@ def _log_error(self, einfo, send_failed_event=True):
)
task = self.task
if task.throws and isinstance(eobj, task.throws):
- severity, exc_info = logging.INFO, None
- description = 'raised expected'
+ do_send_mail, severity, exc_info, description = (
+ False, logging.INFO, None, 'raised expected',
+ )
else:
- severity = logging.ERROR
- description = 'raised unexpected'
+ do_send_mail, severity, description = (
+ True, logging.ERROR, 'raised unexpected',
+ )
format = self.error_msg
if send_failed_event:
self.send_event(
@@ -505,7 +507,8 @@ def _log_error(self, einfo, send_failed_event=True):
'hostname': self.hostname,
'internal': internal}})
- task.send_error_email(context, einfo.exception)
+ if do_send_mail:
+ task.send_error_email(context, einfo.exception)
def acknowledge(self):
"""Acknowledge task."""
From 84006b2405f48bf61785a608e938be13204fc204 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 26 Mar 2014 13:51:04 +0000
Subject: [PATCH 0027/1103] Removes duplicate _set_cache. Closes #1940
---
celery/result.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/celery/result.py b/celery/result.py
index 069d8fde8..3ea837c05 100644
--- a/celery/result.py
+++ b/celery/result.py
@@ -328,7 +328,6 @@ def _get_task_meta(self):
if meta:
state = meta['status']
if state == states.SUCCESS or state in states.PROPAGATE_STATES:
- self._set_cache(meta)
return self._set_cache(meta)
return meta
return self._cache
From 36bdbde7c6af6845918fb8ed7ad7c911ba628772 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 27 Mar 2014 14:01:18 +0000
Subject: [PATCH 0028/1103] Stresstests: Backend may not implement .forget
---
funtests/stress/stress/suite.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/funtests/stress/stress/suite.py b/funtests/stress/stress/suite.py
index c88ec095e..3d2f91cb8 100755
--- a/funtests/stress/stress/suite.py
+++ b/funtests/stress/stress/suite.py
@@ -250,7 +250,10 @@ def bigtasksbigvalue(self):
self.join(r, timeout=10)
finally:
# very big values so remove results from backend
- r.forget()
+ try:
+ r.forget()
+ except NotImplementedError:
+ pass
def bigtasks(self, wait=None):
self._revoketerm(wait, False, False, BIG)
From 61906fdde36b9014917c3b0f5e7cf55e3822b24a Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 27 Mar 2014 14:08:26 +0000
Subject: [PATCH 0029/1103] Master branch is now 3.2a1
---
README.rst | 2 +-
celery/__init__.py | 4 ++--
docs/includes/introduction.txt | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/README.rst b/README.rst
index 2ec057fce..8e349b866 100644
--- a/README.rst
+++ b/README.rst
@@ -4,7 +4,7 @@
.. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png
-:Version: 3.1.10 (Cipater)
+:Version: 3.2.0a1 (Cipater)
:Web: http://celeryproject.org/
:Download: http://pypi.python.org/pypi/celery/
:Source: http://github.com/celery/celery/
diff --git a/celery/__init__.py b/celery/__init__.py
index 489b6b4e1..848907cf3 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -13,8 +13,8 @@
'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'),
)
-SERIES = 'Cipater'
-VERSION = version_info_t(3, 1, 10, '', '')
+SERIES = 'DEV'
+VERSION = version_info_t(3, 2, 0, 'a1', '')
__version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION)
__author__ = 'Ask Solem'
__contact__ = 'ask@celeryproject.org'
diff --git a/docs/includes/introduction.txt b/docs/includes/introduction.txt
index c96304ff1..4cbc2627f 100644
--- a/docs/includes/introduction.txt
+++ b/docs/includes/introduction.txt
@@ -1,4 +1,4 @@
-:Version: 3.1.10 (Cipater)
+:Version: 3.2.0a1 (Cipater)
:Web: http://celeryproject.org/
:Download: http://pypi.python.org/pypi/celery/
:Source: http://github.com/celery/celery/
From 59ece7d5f21f1ccc67263053c641c03648baebad Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 27 Mar 2014 14:58:47 +0000
Subject: [PATCH 0030/1103] Stresstests: Refactors Suite into BaseSuite and
Suite
---
funtests/stress/stress/suite.py | 164 +++++++++++++++++++-------------
1 file changed, 98 insertions(+), 66 deletions(-)
diff --git a/funtests/stress/stress/suite.py b/funtests/stress/stress/suite.py
index 3d2f91cb8..10d56f4cf 100755
--- a/funtests/stress/stress/suite.py
+++ b/funtests/stress/stress/suite.py
@@ -1,11 +1,12 @@
from __future__ import absolute_import, print_function, unicode_literals
+import inspect
import platform
import random
import socket
import sys
-from collections import namedtuple
+from collections import defaultdict, namedtuple
from itertools import count
from time import sleep
@@ -13,14 +14,14 @@
from celery import group, VERSION_BANNER
from celery.exceptions import TimeoutError
-from celery.five import range, values, monotonic
+from celery.five import items, monotonic, range, values
from celery.utils.debug import blockdetection
from celery.utils.text import pluralize, truncate
from celery.utils.timeutils import humanize_seconds
from .app import (
marker, _marker, add, any_, exiting, kill, sleeping,
- sleeping_ignore_limits, segfault, any_returning,
+ sleeping_ignore_limits, any_returning,
)
from .data import BIG, SMALL
from .fbi import FBI
@@ -83,7 +84,7 @@ def testgroup(*funs):
return OrderedDict((fun.__name__, fun) for fun in funs)
-class Suite(object):
+class BaseSuite(object):
def __init__(self, app, block_timeout=30 * 60):
self.app = app
@@ -92,30 +93,26 @@ def __init__(self, app, block_timeout=30 * 60):
self.progress = None
self.speaker = Speaker()
self.fbi = FBI(app)
+ self.init_groups()
- self.groups = {
- 'all': testgroup(
- self.manyshort,
- self.termbysig,
- self.bigtasks,
- self.bigtasksbigvalue,
- self.smalltasks,
- self.timelimits,
- self.timelimits_soft,
- self.revoketermfast,
- self.revoketermslow,
- self.alwayskilled,
- self.alwaysexits,
- ),
- 'green': testgroup(
- self.manyshort,
- self.bigtasks,
- self.bigtasksbigvalue,
- self.smalltasks,
- self.alwaysexits,
- self.group_with_exit,
- ),
- }
+ def init_groups(self):
+ acc = defaultdict(list)
+ for attr in dir(self):
+ if not _is_descriptor(self, attr):
+ meth = getattr(self, attr)
+ try:
+ groups = meth.__func__.__testgroup__
+ except AttributeError:
+ pass
+ else:
+ for group in groups:
+ acc[group].append(meth)
+ # sort the tests by the order in which they are defined in the class
+ for group in values(acc):
+ group[:] = sorted(group, key=lambda m: m.__func__.__testsort__)
+ self.groups = dict(
+ (name, testgroup(*tests)) for name, tests in items(acc)
+ )
def run(self, names=None, iterations=50, offset=0,
numtests=None, list_all=False, repeat=0, group='all',
@@ -167,10 +164,6 @@ def banner(self, tests):
total=len(tests),
)
- def manyshort(self):
- self.join(group(add.s(i, i) for i in range(1000))(),
- timeout=10, propagate=True)
-
def runtest(self, fun, n=50, index=0, repeats=1):
print('{0}: [[[{1}({2})]]]'.format(repeats, fun.__name__, n))
with blockdetection(self.block_timeout):
@@ -211,26 +204,92 @@ def runtest(self, fun, n=50, index=0, repeats=1):
fun, i + 1, n, index, repeats, runtime, elapsed, 1,
)
+ def missing_results(self, r):
+ return [res.id for res in r if res.id not in res.backend._cache]
+
+ def join(self, r, propagate=False, max_retries=10, **kwargs):
+ if self.no_join:
+ return
+ received = []
+
+ def on_result(task_id, value):
+ received.append(task_id)
+
+ for i in range(max_retries) if max_retries else count(0):
+ received[:] = []
+ try:
+ return r.get(callback=on_result, propagate=propagate, **kwargs)
+ except (socket.timeout, TimeoutError) as exc:
+ waiting_for = self.missing_results(r)
+ self.speaker.beep()
+ marker(
+ 'Still waiting for {0}/{1}: [{2}]: {3!r}'.format(
+ len(r) - len(received), len(r),
+ truncate(', '.join(waiting_for)), exc), '!',
+ )
+ self.fbi.diag(waiting_for)
+ except self.connerrors as exc:
+ self.speaker.beep()
+ marker('join: connection lost: {0!r}'.format(exc), '!')
+ raise StopSuite('Test failed: Missing task results')
+
+ def dump_progress(self):
+ return pstatus(self.progress) if self.progress else 'No test running'
+
+
+_creation_counter = count(0)
+def testcase(*groups):
+ if not groups:
+ raise ValueError('@testcase requires at least one group name')
+
+ def _mark_as_case(fun):
+ fun.__testgroup__ = groups
+ fun.__testsort__ = next(_creation_counter)
+ return fun
+
+ return _mark_as_case
+
+
+def _is_descriptor(obj, attr):
+ try:
+ cattr = getattr(obj.__class__, attr)
+ except AttributeError:
+ pass
+ else:
+ return not inspect.ismethod(cattr) and hasattr(cattr, '__get__')
+ return False
+
+
+class Suite(BaseSuite):
+
+ @testcase('all', 'green')
+ def manyshort(self):
+ self.join(group(add.s(i, i) for i in range(1000))(),
+ timeout=10, propagate=True)
+
+ @testcase('all')
def termbysig(self):
self._evil_groupmember(kill)
+ @testcase('green')
def group_with_exit(self):
self._evil_groupmember(exiting)
- def termbysegfault(self):
- self._evil_groupmember(segfault)
-
+ @testcase('all')
def timelimits(self):
self._evil_groupmember(sleeping, 2, time_limit=1)
+ @testcase('all')
def timelimits_soft(self):
self._evil_groupmember(sleeping_ignore_limits, 2,
soft_time_limit=1, time_limit=1.1)
+ @testcase('all')
def alwayskilled(self):
g = group(kill.s() for _ in range(10))
self.join(g(), timeout=10)
+ @testcase('all', 'green')
def alwaysexits(self):
g = group(exiting.s() for _ in range(10))
self.join(g(), timeout=10)
@@ -243,6 +302,7 @@ def _evil_groupmember(self, evil_t, *eargs, **opts):
self.join(g1(), timeout=10)
self.join(g2(), timeout=10)
+ @testcase('all', 'green')
def bigtasksbigvalue(self):
g = group(any_returning.s(BIG, sleep=0.3) for i in range(8))
r = g()
@@ -255,15 +315,19 @@ def bigtasksbigvalue(self):
except NotImplementedError:
pass
+ @testcase('all', 'green')
def bigtasks(self, wait=None):
self._revoketerm(wait, False, False, BIG)
+ @testcase('all', 'green')
def smalltasks(self, wait=None):
self._revoketerm(wait, False, False, SMALL)
+ @testcase('all')
def revoketermfast(self, wait=None):
self._revoketerm(wait, True, False, SMALL)
+ @testcase('all')
def revoketermslow(self, wait=5):
self._revoketerm(wait, True, True, BIG)
@@ -276,35 +340,3 @@ def _revoketerm(self, wait=None, terminate=True,
sleep(random.choice(range(4)))
r.revoke(terminate=True)
self.join(r, timeout=10)
-
- def missing_results(self, r):
- return [res.id for res in r if res.id not in res.backend._cache]
-
- def join(self, r, propagate=False, max_retries=10, **kwargs):
- if self.no_join:
- return
- received = []
-
- def on_result(task_id, value):
- received.append(task_id)
-
- for i in range(max_retries) if max_retries else count(0):
- received[:] = []
- try:
- return r.get(callback=on_result, propagate=propagate, **kwargs)
- except (socket.timeout, TimeoutError) as exc:
- waiting_for = self.missing_results(r)
- self.speaker.beep()
- marker(
- 'Still waiting for {0}/{1}: [{2}]: {3!r}'.format(
- len(r) - len(received), len(r),
- truncate(', '.join(waiting_for)), exc), '!',
- )
- self.fbi.diag(waiting_for)
- except self.connerrors as exc:
- self.speaker.beep()
- marker('join: connection lost: {0!r}'.format(exc), '!')
- raise StopSuite('Test failed: Missing task results')
-
- def dump_progress(self):
- return pstatus(self.progress) if self.progress else 'No test running'
From 8b7e3f2e9a7e91b055885e14b90ccb0167871b98 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 27 Mar 2014 15:36:42 +0000
Subject: [PATCH 0031/1103] TaskProducer replaced by create_task_message and
send_task_message
---
celery/app/amqp.py | 411 +++++++++++-------------
celery/app/base.py | 25 +-
celery/app/task.py | 2 +-
celery/beat.py | 2 +-
celery/contrib/migrate.py | 4 +-
celery/five.py | 1 -
celery/task/base.py | 15 +-
celery/task/sets.py | 2 +-
celery/tests/app/test_amqp.py | 96 +-----
celery/tests/app/test_app.py | 35 +-
celery/tests/backends/test_amqp.py | 6 +-
celery/tests/tasks/test_tasks.py | 5 -
docs/reference/celery.app.amqp.rst | 19 +-
examples/eventlet/bulk_task_producer.py | 26 +-
14 files changed, 267 insertions(+), 382 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index b0dae95e0..c5b253396 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -10,13 +10,14 @@
import numbers
+from collections import Mapping, namedtuple
from datetime import timedelta
from weakref import WeakValueDictionary
from kombu import Connection, Consumer, Exchange, Producer, Queue
from kombu.common import Broadcast
from kombu.pools import ProducerPool
-from kombu.utils import cached_property, uuid
+from kombu.utils import cached_property
from kombu.utils.encoding import safe_repr
from kombu.utils.functional import maybe_list
@@ -25,10 +26,9 @@
from celery.utils.text import indent as textindent
from celery.utils.timeutils import to_utc
-from . import app_or_default
from . import routes as _routes
-__all__ = ['AMQP', 'Queues', 'TaskProducer', 'TaskConsumer']
+__all__ = ['AMQP', 'Queues', 'task_message']
#: Human readable queue declaration.
QUEUE_FORMAT = """
@@ -36,6 +36,9 @@
key={0.routing_key}
"""
+task_message = namedtuple('task_message',
+ ('headers', 'properties', 'body', 'sent_event'))
+
class Queues(dict):
"""Queue name⇒ declaration mapping.
@@ -184,204 +187,14 @@ def consume_from(self):
return self
-class TaskProducer(Producer):
- app = None
- auto_declare = False
- retry = False
- retry_policy = None
- utc = True
- event_dispatcher = None
- send_sent_event = False
-
- def __init__(self, channel=None, exchange=None, *args, **kwargs):
- self.retry = kwargs.pop('retry', self.retry)
- self.retry_policy = kwargs.pop('retry_policy',
- self.retry_policy or {})
- self.send_sent_event = kwargs.pop('send_sent_event',
- self.send_sent_event)
- exchange = exchange or self.exchange
- self.queues = self.app.amqp.queues # shortcut
- self.default_queue = self.app.amqp.default_queue
- super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs)
-
- def publish_task(self, task_name, task_args=None, task_kwargs=None,
- countdown=None, eta=None, task_id=None, group_id=None,
- taskset_id=None, # compat alias to group_id
- expires=None, exchange=None, exchange_type=None,
- event_dispatcher=None, retry=None, retry_policy=None,
- queue=None, now=None, retries=0, chord=None,
- callbacks=None, errbacks=None, routing_key=None,
- serializer=None, delivery_mode=None, compression=None,
- reply_to=None, time_limit=None, soft_time_limit=None,
- declare=None, headers=None,
- send_before_publish=signals.before_task_publish.send,
- before_receivers=signals.before_task_publish.receivers,
- send_after_publish=signals.after_task_publish.send,
- after_receivers=signals.after_task_publish.receivers,
- send_task_sent=signals.task_sent.send, # XXX deprecated
- sent_receivers=signals.task_sent.receivers,
- **kwargs):
- """Send task message."""
- retry = self.retry if retry is None else retry
- headers = {} if headers is None else headers
-
- qname = queue
- if queue is None and exchange is None:
- queue = self.default_queue
- if queue is not None:
- if isinstance(queue, string_t):
- qname, queue = queue, self.queues[queue]
- else:
- qname = queue.name
- exchange = exchange or queue.exchange.name
- routing_key = routing_key or queue.routing_key
- if declare is None and queue and not isinstance(queue, Broadcast):
- declare = [queue]
-
- # merge default and custom policy
- retry = self.retry if retry is None else retry
- _rp = (dict(self.retry_policy, **retry_policy) if retry_policy
- else self.retry_policy)
- task_id = task_id or uuid()
- task_args = task_args or []
- task_kwargs = task_kwargs or {}
- if not isinstance(task_args, (list, tuple)):
- raise ValueError('task args must be a list or tuple')
- if not isinstance(task_kwargs, dict):
- raise ValueError('task kwargs must be a dictionary')
- if countdown: # Convert countdown to ETA.
- now = now or self.app.now()
- eta = now + timedelta(seconds=countdown)
- if self.utc:
- eta = to_utc(eta).astimezone(self.app.timezone)
- if isinstance(expires, numbers.Real):
- now = now or self.app.now()
- expires = now + timedelta(seconds=expires)
- if self.utc:
- expires = to_utc(expires).astimezone(self.app.timezone)
- eta = eta and eta.isoformat()
- expires = expires and expires.isoformat()
-
- body = {
- 'task': task_name,
- 'id': task_id,
- 'args': task_args,
- 'kwargs': task_kwargs,
- 'retries': retries or 0,
- 'eta': eta,
- 'expires': expires,
- 'utc': self.utc,
- 'callbacks': callbacks,
- 'errbacks': errbacks,
- 'timelimit': (time_limit, soft_time_limit),
- 'taskset': group_id or taskset_id,
- 'chord': chord,
- }
-
- if before_receivers:
- send_before_publish(
- sender=task_name, body=body,
- exchange=exchange,
- routing_key=routing_key,
- declare=declare,
- headers=headers,
- properties=kwargs,
- retry_policy=retry_policy,
- )
-
- self.publish(
- body,
- exchange=exchange, routing_key=routing_key,
- serializer=serializer or self.serializer,
- compression=compression or self.compression,
- headers=headers,
- retry=retry, retry_policy=_rp,
- reply_to=reply_to,
- correlation_id=task_id,
- delivery_mode=delivery_mode, declare=declare,
- **kwargs
- )
-
- if after_receivers:
- send_after_publish(sender=task_name, body=body,
- exchange=exchange, routing_key=routing_key)
-
- if sent_receivers: # XXX deprecated
- send_task_sent(sender=task_name, task_id=task_id,
- task=task_name, args=task_args,
- kwargs=task_kwargs, eta=eta,
- taskset=group_id or taskset_id)
- if self.send_sent_event:
- evd = event_dispatcher or self.event_dispatcher
- exname = exchange or self.exchange
- if isinstance(exname, Exchange):
- exname = exname.name
- evd.publish(
- 'task-sent',
- {
- 'uuid': task_id,
- 'name': task_name,
- 'args': safe_repr(task_args),
- 'kwargs': safe_repr(task_kwargs),
- 'retries': retries,
- 'eta': eta,
- 'expires': expires,
- 'queue': qname,
- 'exchange': exname,
- 'routing_key': routing_key,
- },
- self, retry=retry, retry_policy=retry_policy,
- )
- return task_id
- delay_task = publish_task # XXX Compat
-
- @cached_property
- def event_dispatcher(self):
- # We call Dispatcher.publish with a custom producer
- # so don't need the dispatcher to be "enabled".
- return self.app.events.Dispatcher(enabled=False)
-
-
-class TaskPublisher(TaskProducer):
- """Deprecated version of :class:`TaskProducer`."""
-
- def __init__(self, channel=None, exchange=None, *args, **kwargs):
- self.app = app_or_default(kwargs.pop('app', self.app))
- self.retry = kwargs.pop('retry', self.retry)
- self.retry_policy = kwargs.pop('retry_policy',
- self.retry_policy or {})
- exchange = exchange or self.exchange
- if not isinstance(exchange, Exchange):
- exchange = Exchange(exchange,
- kwargs.pop('exchange_type', 'direct'))
- self.queues = self.app.amqp.queues # shortcut
- super(TaskPublisher, self).__init__(channel, exchange, *args, **kwargs)
-
-
-class TaskConsumer(Consumer):
- app = None
-
- def __init__(self, channel, queues=None, app=None, accept=None, **kw):
- self.app = app or self.app
- if accept is None:
- accept = self.app.conf.CELERY_ACCEPT_CONTENT
- super(TaskConsumer, self).__init__(
- channel,
- queues or list(self.app.amqp.queues.consume_from.values()),
- accept=accept,
- **kw
- )
-
-
class AMQP(object):
Connection = Connection
Consumer = Consumer
+ Producer = Producer
#: compat alias to Connection
BrokerConnection = Connection
- producer_cls = TaskProducer
- consumer_cls = TaskConsumer
queues_cls = Queues
#: Cached and prepared routing table.
@@ -400,6 +213,18 @@ class AMQP(object):
def __init__(self, app):
self.app = app
+ @cached_property
+ def _task_retry(self):
+ return self.app.conf.CELERY_TASK_PUBLISH_RETRY
+
+ @cached_property
+ def _task_retry_policy(self):
+ return self.app.conf.CELERY_TASK_PUBLISH_RETRY_POLICY
+
+ @cached_property
+ def _task_sent_event(self):
+ return self.app.conf.CELERY_SEND_TASK_SENT_EVENT
+
def flush_routes(self):
self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)
@@ -429,35 +254,14 @@ def Router(self, queues=None, create_missing=None):
self.app.either('CELERY_CREATE_MISSING_QUEUES',
create_missing), app=self.app)
- @cached_property
- def TaskConsumer(self):
- """Return consumer configured to consume from the queues
- we are configured for (``app.amqp.queues.consume_from``)."""
- return self.app.subclass_with_self(self.consumer_cls,
- reverse='amqp.TaskConsumer')
- get_task_consumer = TaskConsumer # XXX compat
-
- @cached_property
- def TaskProducer(self):
- """Return publisher used to send tasks.
-
- You should use `app.send_task` instead.
-
- """
- conf = self.app.conf
- return self.app.subclass_with_self(
- self.producer_cls,
- reverse='amqp.TaskProducer',
- exchange=self.default_exchange,
- routing_key=conf.CELERY_DEFAULT_ROUTING_KEY,
- serializer=conf.CELERY_TASK_SERIALIZER,
- compression=conf.CELERY_MESSAGE_COMPRESSION,
- retry=conf.CELERY_TASK_PUBLISH_RETRY,
- retry_policy=conf.CELERY_TASK_PUBLISH_RETRY_POLICY,
- send_sent_event=conf.CELERY_SEND_TASK_SENT_EVENT,
- utc=conf.CELERY_ENABLE_UTC,
+ def TaskConsumer(self, channel, queues=None, accept=None, **kw):
+ if accept is None:
+ accept = self.app.conf.CELERY_ACCEPT_CONTENT
+ return self.Consumer(
+ channel, accept=accept,
+ queues=queues or list(self.queues.consume_from.values()),
+ **kw
)
- TaskPublisher = TaskProducer # compat
@cached_property
def default_queue(self):
@@ -488,7 +292,7 @@ def producer_pool(self):
self._producer_pool = ProducerPool(
self.app.pool,
limit=self.app.pool.limit,
- Producer=self.TaskProducer,
+ Producer=self.Producer,
)
return self._producer_pool
publisher_pool = producer_pool # compat alias
@@ -497,3 +301,164 @@ def producer_pool(self):
def default_exchange(self):
return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE,
self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE)
+
+ def create_task_message(self, task_id, name, args=None, kwargs=None,
+ countdown=None, eta=None, group_id=None,
+ expires=None, now=None, retries=0, chord=None,
+ callbacks=None, errbacks=None, reply_to=None,
+ time_limit=None, soft_time_limit=None,
+ create_sent_event=False):
+ args = args or ()
+ kwargs = kwargs or {}
+ utc = self.utc
+ if not isinstance(args, (list, tuple)):
+ raise ValueError('task args must be a list or tuple')
+ if not isinstance(kwargs, Mapping):
+ raise ValueError('task keyword arguments must be a mapping')
+ if countdown: # convert countdown to ETA
+ now = now or self.app.now()
+ eta = now + timedelta(seconds=countdown)
+ if utc:
+ eta = to_utc(eta).astimezone(self.app.timezone)
+ if isinstance(expires, numbers.Real):
+ now = now or self.app.now()
+ expires = now + timedelta(seconds=expires)
+ if utc:
+ expires = to_utc(expires).astimezone(self.app.timezone)
+ eta = eta and eta.isoformat()
+ expires = expires and expires.isoformat()
+
+ return task_message(
+ {},
+ {
+ 'correlation_id': task_id,
+ 'reply_to': reply_to,
+ },
+ {
+ 'task': name,
+ 'id': task_id,
+ 'args': args,
+ 'kwargs': kwargs,
+ 'retries': retries,
+ 'eta': eta,
+ 'expires': expires,
+ 'utc': utc,
+ 'callbacks': callbacks,
+ 'errbacks': errbacks,
+ 'timelimit': (time_limit, soft_time_limit),
+ 'taskset': group_id,
+ 'chord': chord,
+ },
+ {
+ 'uuid': task_id,
+ 'name': name,
+ 'args': safe_repr(args),
+ 'kwargs': safe_repr(kwargs),
+ 'retries': retries,
+ 'eta': eta,
+ 'expires': expires,
+ } if create_sent_event else None,
+ )
+
+ def _create_task_sender(self):
+ default_retry = self.app.conf.CELERY_TASK_PUBLISH_RETRY
+ default_policy = self.app.conf.CELERY_TASK_PUBLISH_RETRY_POLICY
+ default_queue = self.default_queue
+ queues = self.queues
+ send_before_publish = signals.before_task_publish.send
+ before_receivers = signals.before_task_publish.receivers
+ send_after_publish = signals.after_task_publish.send
+ after_receivers = signals.after_task_publish.receivers
+
+ send_task_sent = signals.task_sent.send # XXX compat
+ sent_receivers = signals.task_sent.receivers
+
+ default_evd = self._event_dispatcher
+ default_exchange = self.default_exchange
+
+ default_rkey = self.app.conf.CELERY_DEFAULT_ROUTING_KEY
+ default_serializer = self.app.conf.CELERY_TASK_SERIALIZER
+ default_compressor = self.app.conf.CELERY_MESSAGE_COMPRESSION
+
+ def publish_task(producer, name, message,
+ exchange=None, routing_key=None, queue=None,
+ event_dispatcher=None, retry=None, retry_policy=None,
+ serializer=None, delivery_mode=None,
+ compression=None, declare=None,
+ headers=None, **kwargs):
+ retry = default_retry if retry is None else retry
+ headers, properties, body, sent_event = message
+ if kwargs:
+ properties.update(kwargs)
+
+ qname = queue
+ if queue is None and exchange is None:
+ queue = default_queue
+ if queue is not None:
+ if isinstance(queue, string_t):
+ qname, queue = queue, queues[queue]
+ else:
+ qname = queue.name
+ exchange = exchange or queue.exchange.name
+ routing_key = routing_key or queue.routing_key
+ if declare is None and queue and not isinstance(queue, Broadcast):
+ declare = [queue]
+
+ # merge default and custom policy
+ retry = default_retry if retry is None else retry
+ _rp = (dict(default_policy, **retry_policy) if retry_policy
+ else default_policy)
+
+ if before_receivers:
+ send_before_publish(
+ sender=name, body=body,
+ exchange=exchange, routing_key=routing_key,
+ declare=declare, headers=headers,
+ properties=kwargs, retry_policy=retry_policy,
+ )
+ ret = producer.publish(
+ body,
+ exchange=exchange or default_exchange,
+ routing_key=routing_key or default_rkey,
+ serializer=serializer or default_serializer,
+ compression=compression or default_compressor,
+ retry=retry, retry_policy=_rp,
+ delivery_mode=delivery_mode, declare=declare,
+ headers=headers,
+ **properties
+ )
+ if after_receivers:
+ send_after_publish(sender=name, body=body,
+ exchange=exchange, routing_key=routing_key)
+ if sent_receivers: # XXX deprecated
+ send_task_sent(sender=name, task_id=body['id'], task=name,
+ args=body['args'], kwargs=body['kwargs'],
+ eta=body['eta'], taskset=body['taskset'])
+ if sent_event:
+ evd = event_dispatcher or default_evd
+ exname = exchange or self.exchange
+ if isinstance(name, Exchange):
+ exname = exname.name
+ sent_event.update({
+ 'queue': qname,
+ 'exchange': exname,
+ 'routing_key': routing_key,
+ })
+ evd.publish('task-sent', sent_event,
+ self, retry=retry, retry_policy=retry_policy)
+ return ret
+ return publish_task
+
+ @cached_property
+ def send_task_message(self):
+ return self._create_task_sender()
+
+ @cached_property
+ def utc(self):
+ return self.app.conf.CELERY_ENABLE_UTC
+
+ @cached_property
+ def _event_dispatcher(self):
+ # We call Dispatcher.publish with a custom producer
+ # so don't need the diuspatcher to be enabled.
+ return self.app.events.Dispatcher(enabled=False)
diff --git a/celery/app/base.py b/celery/app/base.py
index 153a5575a..c934a7a94 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -302,26 +302,33 @@ def send_task(self, name, args=None, kwargs=None, countdown=None,
eta=None, task_id=None, producer=None, connection=None,
router=None, result_cls=None, expires=None,
publisher=None, link=None, link_error=None,
- add_to_parent=True, reply_to=None, **options):
+ add_to_parent=True, group_id=None, retries=0, chord=None,
+ reply_to=None, time_limit=None, soft_time_limit=None,
+ **options):
+ amqp = self.amqp
task_id = task_id or uuid()
producer = producer or publisher # XXX compat
- router = router or self.amqp.router
+ router = router or amqp.router
conf = self.conf
if conf.CELERY_ALWAYS_EAGER: # pragma: no cover
warnings.warn(AlwaysEagerIgnored(
'CELERY_ALWAYS_EAGER has no effect on send_task',
), stacklevel=2)
options = router.route(options, name, args, kwargs)
+
+ message = amqp.create_task_message(
+ task_id, name, args, kwargs, countdown, eta, group_id,
+ expires, retries, chord,
+ maybe_list(link), maybe_list(link_error),
+ reply_to or self.oid, time_limit, soft_time_limit,
+ self.conf.CELERY_SEND_TASK_SENT_EVENT,
+ )
+
if connection:
- producer = self.amqp.TaskProducer(connection)
+ producer = amqp.Producer(connection)
with self.producer_or_acquire(producer) as P:
self.backend.on_task_call(P, task_id)
- task_id = P.publish_task(
- name, args, kwargs, countdown=countdown, eta=eta,
- task_id=task_id, expires=expires,
- callbacks=maybe_list(link), errbacks=maybe_list(link_error),
- reply_to=reply_to or self.oid, **options
- )
+ amqp.send_task_message(P, name, message, **options)
result = (result_cls or self.AsyncResult)(task_id)
if add_to_parent:
parent = get_current_worker_task()
diff --git a/celery/app/task.py b/celery/app/task.py
index 79f6d3fca..dcb32a34d 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -525,7 +525,7 @@ def apply_async(self, args=None, kwargs=None, task_id=None, producer=None,
:keyword link_error: A single, or a list of tasks to apply
if an error occurs while executing the task.
- :keyword producer: :class:~@amqp.TaskProducer` instance to use.
+ :keyword producer: :class:~@kombu.Producer` instance to use.
:keyword add_to_parent: If set to True (default) and the task
is applied while executing another task, then the result
will be appended to the parent tasks ``request.children``
diff --git a/celery/beat.py b/celery/beat.py
index 8205c2781..0b2ec97f2 100644
--- a/celery/beat.py
+++ b/celery/beat.py
@@ -179,7 +179,7 @@ def __init__(self, app, schedule=None, max_interval=None,
self.sync_every_tasks = (
app.conf.CELERYBEAT_SYNC_EVERY if sync_every_tasks is None
else sync_every_tasks)
- self.Publisher = Publisher or app.amqp.TaskProducer
+ self.Publisher = Publisher or app.amqp.Producer
if not lazy:
self.setup_schedule()
diff --git a/celery/contrib/migrate.py b/celery/contrib/migrate.py
index e4a10e9b9..c50fba2c7 100644
--- a/celery/contrib/migrate.py
+++ b/celery/contrib/migrate.py
@@ -99,7 +99,7 @@ def migrate_tasks(source, dest, migrate=migrate_task, app=None,
queues=None, **kwargs):
app = app_or_default(app)
queues = prepare_queues(queues)
- producer = app.amqp.TaskProducer(dest)
+ producer = app.amqp.Producer(dest)
migrate = partial(migrate, producer, queues=queues)
def on_declare_queue(queue):
@@ -186,7 +186,7 @@ def transform(value):
app = app_or_default(app)
queues = [_maybe_queue(app, queue) for queue in source or []] or None
with app.connection_or_acquire(connection, pool=False) as conn:
- producer = app.amqp.TaskProducer(conn)
+ producer = app.amqp.Producer(conn)
state = State()
def on_task(body, message):
diff --git a/celery/five.py b/celery/five.py
index dfee614e4..77ec1daa5 100644
--- a/celery/five.py
+++ b/celery/five.py
@@ -238,7 +238,6 @@ def _compat_periodic_task_decorator(*args, **kwargs):
'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger',
},
'messaging': {
- 'TaskPublisher': 'amqp.TaskPublisher',
'TaskConsumer': 'amqp.TaskConsumer',
'establish_connection': 'connection',
'get_consumer_set': 'amqp.TaskConsumer',
diff --git a/celery/task/base.py b/celery/task/base.py
index 9d466b57c..9e12d4f8c 100644
--- a/celery/task/base.py
+++ b/celery/task/base.py
@@ -106,12 +106,19 @@ def get_publisher(self, connection=None, exchange=None,
exchange_type=None, **options):
"""Deprecated method to get the task publisher (now called producer).
- Should be replaced with :class:`@amqp.TaskProducer`:
+ Should be replaced with :class:`@kombu.Producer`:
.. code-block:: python
- with celery.connection() as conn:
- with celery.amqp.TaskProducer(conn) as prod:
+ with app.connection() as conn:
+ with app.amqp.Producer(conn) as prod:
+ my_task.apply_async(producer=prod)
+
+ or event better is to use the :class:`@amqp.producer_pool`:
+
+ .. code-block:: python
+
+ with app.producer_or_acquire() as prod:
my_task.apply_async(producer=prod)
"""
@@ -119,7 +126,7 @@ def get_publisher(self, connection=None, exchange=None,
if exchange_type is None:
exchange_type = self.exchange_type
connection = connection or self.establish_connection()
- return self._get_app().amqp.TaskProducer(
+ return self._get_app().amqp.Producer(
connection,
exchange=exchange and Exchange(exchange, exchange_type),
routing_key=self.routing_key, **options
diff --git a/celery/task/sets.py b/celery/task/sets.py
index e277b796d..7d4355f62 100644
--- a/celery/task/sets.py
+++ b/celery/task/sets.py
@@ -46,7 +46,7 @@ def __init__(self, tasks=None, app=None, Publisher=None):
super(TaskSet, self).__init__(
maybe_signature(t, app=self.app) for t in tasks or []
)
- self.Publisher = Publisher or self.app.amqp.TaskProducer
+ self.Publisher = Publisher or self.app.amqp.Producer
self.total = len(self) # XXX compat
def apply_async(self, connection=None, publisher=None, taskset_id=None):
diff --git a/celery/tests/app/test_amqp.py b/celery/tests/app/test_amqp.py
index efb398ac6..cf2810d5b 100644
--- a/celery/tests/app/test_amqp.py
+++ b/celery/tests/app/test_amqp.py
@@ -1,86 +1,10 @@
from __future__ import absolute_import
-import datetime
-
-import pytz
-
from kombu import Exchange, Queue
-from celery.app.amqp import Queues, TaskPublisher
+from celery.app.amqp import Queues
from celery.five import keys
-from celery.tests.case import AppCase, Mock
-
-
-class test_TaskProducer(AppCase):
-
- def test__exit__(self):
- publisher = self.app.amqp.TaskProducer(self.app.connection())
- publisher.release = Mock()
- with publisher:
- pass
- publisher.release.assert_called_with()
-
- def test_declare(self):
- publisher = self.app.amqp.TaskProducer(self.app.connection())
- publisher.exchange.name = 'foo'
- publisher.declare()
- publisher.exchange.name = None
- publisher.declare()
-
- def test_retry_policy(self):
- prod = self.app.amqp.TaskProducer(Mock())
- prod.channel.connection.client.declared_entities = set()
- prod.publish_task('tasks.add', (2, 2), {},
- retry_policy={'frobulate': 32.4})
-
- def test_publish_no_retry(self):
- prod = self.app.amqp.TaskProducer(Mock())
- prod.channel.connection.client.declared_entities = set()
- prod.publish_task('tasks.add', (2, 2), {}, retry=False, chord=123)
- self.assertFalse(prod.connection.ensure.call_count)
-
- def test_publish_custom_queue(self):
- prod = self.app.amqp.TaskProducer(Mock())
- self.app.amqp.queues['some_queue'] = Queue(
- 'xxx', Exchange('yyy'), 'zzz',
- )
- prod.channel.connection.client.declared_entities = set()
- prod.publish = Mock()
- prod.publish_task('tasks.add', (8, 8), {}, retry=False,
- queue='some_queue')
- self.assertEqual(prod.publish.call_args[1]['exchange'], 'yyy')
- self.assertEqual(prod.publish.call_args[1]['routing_key'], 'zzz')
-
- def test_publish_with_countdown(self):
- prod = self.app.amqp.TaskProducer(Mock())
- prod.channel.connection.client.declared_entities = set()
- prod.publish = Mock()
- now = datetime.datetime(2013, 11, 26, 16, 48, 46)
- prod.publish_task('tasks.add', (1, 1), {}, retry=False,
- countdown=10, now=now)
- self.assertEqual(
- prod.publish.call_args[0][0]['eta'],
- '2013-11-26T16:48:56+00:00',
- )
-
- def test_publish_with_countdown_and_timezone(self):
- # use timezone with fixed offset to be sure it won't be changed
- self.app.conf.CELERY_TIMEZONE = pytz.FixedOffset(120)
- prod = self.app.amqp.TaskProducer(Mock())
- prod.channel.connection.client.declared_entities = set()
- prod.publish = Mock()
- now = datetime.datetime(2013, 11, 26, 16, 48, 46)
- prod.publish_task('tasks.add', (2, 2), {}, retry=False,
- countdown=20, now=now)
- self.assertEqual(
- prod.publish.call_args[0][0]['eta'],
- '2013-11-26T18:49:06+02:00',
- )
-
- def test_event_dispatcher(self):
- prod = self.app.amqp.TaskProducer(Mock())
- self.assertTrue(prod.event_dispatcher)
- self.assertFalse(prod.event_dispatcher.enabled)
+from celery.tests.case import AppCase
class test_TaskConsumer(AppCase):
@@ -98,22 +22,6 @@ def test_accept_content(self):
)
-class test_compat_TaskPublisher(AppCase):
-
- def test_compat_exchange_is_string(self):
- producer = TaskPublisher(exchange='foo', app=self.app)
- self.assertIsInstance(producer.exchange, Exchange)
- self.assertEqual(producer.exchange.name, 'foo')
- self.assertEqual(producer.exchange.type, 'direct')
- producer = TaskPublisher(exchange='foo', exchange_type='topic',
- app=self.app)
- self.assertEqual(producer.exchange.type, 'topic')
-
- def test_compat_exchange_is_Exchange(self):
- producer = TaskPublisher(exchange=Exchange('foo'), app=self.app)
- self.assertEqual(producer.exchange.name, 'foo')
-
-
class test_PublisherPool(AppCase):
def test_setup_nolimit(self):
diff --git a/celery/tests/app/test_app.py b/celery/tests/app/test_app.py
index 113dedae1..36cdb67a2 100644
--- a/celery/tests/app/test_app.py
+++ b/celery/tests/app/test_app.py
@@ -8,7 +8,6 @@
from pickle import loads, dumps
from amqp import promise
-from kombu import Exchange
from celery import shared_task, current_app
from celery import app as _app
@@ -336,10 +335,13 @@ def test_apply_async_has__self__(self):
def aawsX():
pass
- with patch('celery.app.amqp.TaskProducer.publish_task') as dt:
- aawsX.apply_async((4, 5))
- args = dt.call_args[0][1]
- self.assertEqual(args, ('hello', 4, 5))
+ with patch('celery.app.amqp.AMQP.create_task_message') as create:
+ with patch('celery.app.amqp.AMQP.send_task_message') as send:
+ create.return_value = Mock(), Mock(), Mock(), Mock()
+ aawsX.apply_async((4, 5))
+ args = create.call_args[0][2]
+ self.assertEqual(args, ('hello', 4, 5))
+ self.assertTrue(send.called)
def test_apply_async_adds_children(self):
from celery._state import _task_stack
@@ -609,22 +611,23 @@ def publish(self, type, fields, *args, **kwargs):
chan.close()
assert conn.transport_cls == 'memory'
- prod = self.app.amqp.TaskProducer(
- conn, exchange=Exchange('foo_exchange'),
- send_sent_event=True,
+ message = self.app.amqp.create_task_message(
+ 'id', 'footask', (), {}, create_sent_event=True,
)
+ prod = self.app.amqp.Producer(conn)
dispatcher = Dispatcher()
- self.assertTrue(prod.publish_task('footask', (), {},
- exchange='moo_exchange',
- routing_key='moo_exchange',
- event_dispatcher=dispatcher))
+ self.app.amqp.send_task_message(
+ prod, 'footask', message,
+ exchange='moo_exchange', routing_key='moo_exchange',
+ event_dispatcher=dispatcher,
+ )
self.assertTrue(dispatcher.sent)
self.assertEqual(dispatcher.sent[0][0], 'task-sent')
- self.assertTrue(prod.publish_task('footask', (), {},
- event_dispatcher=dispatcher,
- exchange='bar_exchange',
- routing_key='bar_exchange'))
+ self.app.amqp.send_task_message(
+ prod, 'footask', message, event_dispatcher=dispatcher,
+ exchange='bar_exchange', routing_key='bar_exchange',
+ )
def test_error_mail_sender(self):
x = ErrorMail.subject % {'name': 'task_name',
diff --git a/celery/tests/backends/test_amqp.py b/celery/tests/backends/test_amqp.py
index 7e5a36196..23a6c46d7 100644
--- a/celery/tests/backends/test_amqp.py
+++ b/celery/tests/backends/test_amqp.py
@@ -108,8 +108,8 @@ def publish(*args, **kwargs):
raise KeyError('foo')
backend = AMQPBackend(self.app)
- from celery.app.amqp import TaskProducer
- prod, TaskProducer.publish = TaskProducer.publish, publish
+ from celery.app.amqp import Producer
+ prod, Producer.publish = Producer.publish, publish
try:
with self.assertRaises(KeyError):
backend.retry_policy['max_retries'] = None
@@ -119,7 +119,7 @@ def publish(*args, **kwargs):
backend.retry_policy['max_retries'] = 10
backend.store_result('foo', 'bar', 'STARTED')
finally:
- TaskProducer.publish = prod
+ Producer.publish = prod
def assertState(self, retval, state):
self.assertEqual(retval['status'], state)
diff --git a/celery/tests/tasks/test_tasks.py b/celery/tests/tasks/test_tasks.py
index 93a782ecc..c01ffc16a 100644
--- a/celery/tests/tasks/test_tasks.py
+++ b/celery/tests/tasks/test_tasks.py
@@ -381,11 +381,6 @@ def test_after_return(self):
finally:
self.mytask.pop_request()
- def test_send_task_sent_event(self):
- with self.app.connection() as conn:
- self.app.conf.CELERY_SEND_TASK_SENT_EVENT = True
- self.assertTrue(self.app.amqp.TaskProducer(conn).send_sent_event)
-
def test_update_state(self):
@self.app.task(shared=False)
diff --git a/docs/reference/celery.app.amqp.rst b/docs/reference/celery.app.amqp.rst
index 467552820..5257acdbf 100644
--- a/docs/reference/celery.app.amqp.rst
+++ b/docs/reference/celery.app.amqp.rst
@@ -17,7 +17,11 @@
.. attribute:: Consumer
- Base Consumer class used. Default is :class:`kombu.compat.Consumer`.
+ Base Consumer class used. Default is :class:`kombu.Consumer`.
+
+ .. attribute:: Producer
+
+ Base Producer class used. Default is :class:`kombu.Producer`.
.. attribute:: queues
@@ -25,13 +29,13 @@
.. automethod:: Queues
.. automethod:: Router
- .. autoattribute:: TaskConsumer
- .. autoattribute:: TaskProducer
.. automethod:: flush_routes
+ .. autoattribute:: create_task_message
+ .. autoattribute:: send_task_message
.. autoattribute:: default_queue
.. autoattribute:: default_exchange
- .. autoattribute:: publisher_pool
+ .. autoattribute:: producer_pool
.. autoattribute:: router
.. autoattribute:: routes
@@ -41,10 +45,3 @@
.. autoclass:: Queues
:members:
:undoc-members:
-
- TaskPublisher
- -------------
-
- .. autoclass:: TaskPublisher
- :members:
- :undoc-members:
diff --git a/examples/eventlet/bulk_task_producer.py b/examples/eventlet/bulk_task_producer.py
index 2002160c0..4bc75a215 100644
--- a/examples/eventlet/bulk_task_producer.py
+++ b/examples/eventlet/bulk_task_producer.py
@@ -3,8 +3,6 @@
from eventlet.queue import LightQueue
from eventlet.event import Event
-from celery import current_app
-
monkey_patch()
@@ -27,9 +25,16 @@ def wait(self, timeout=None):
class ProducerPool(object):
+ """Usage::
+
+ >>> app = Celery(broker='amqp://')
+ >>> ProducerPool(app)
+
+ """
Receipt = Receipt
- def __init__(self, size=20):
+ def __init__(self, app, size=20):
+ self.app = app
self.size = size
self.inqueue = LightQueue()
self._running = None
@@ -48,13 +53,12 @@ def _run(self):
]
def _producer(self):
- connection = current_app.connection()
- publisher = current_app.amqp.TaskProducer(connection)
inqueue = self.inqueue
- while 1:
- task, args, kwargs, options, receipt = inqueue.get()
- result = task.apply_async(args, kwargs,
- publisher=publisher,
- **options)
- receipt.finished(result)
+ with self.app.producer_or_acquire() as producer:
+ while 1:
+ task, args, kwargs, options, receipt = inqueue.get()
+ result = task.apply_async(args, kwargs,
+ producer=producer,
+ **options)
+ receipt.finished(result)
From f9e49a8f7ae0ad349705ee9b2ea79787b9e65d83 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 27 Mar 2014 16:18:54 +0000
Subject: [PATCH 0032/1103] 3.2: Use dict and set comprehensions (+ literals)
---
celery/app/amqp.py | 7 +-
celery/app/defaults.py | 2 +-
celery/app/registry.py | 4 +-
celery/app/task.py | 7 +-
celery/app/utils.py | 8 +-
celery/backends/base.py | 16 ++--
celery/backends/cache.py | 2 +-
celery/beat.py | 7 +-
celery/bin/base.py | 7 +-
celery/bin/celery.py | 8 +-
celery/bin/graph.py | 2 +-
celery/bin/multi.py | 6 +-
celery/concurrency/asynpool.py | 15 ++--
celery/contrib/migrate.py | 2 +-
celery/datastructures.py | 6 +-
celery/events/__init__.py | 4 +-
celery/events/state.py | 25 +++---
celery/five.py | 21 +++--
celery/result.py | 6 +-
celery/schedules.py | 4 +-
celery/task/http.py | 10 +--
celery/tests/app/test_amqp.py | 4 +-
celery/tests/app/test_schedules.py | 100 +++++++++++-----------
celery/tests/concurrency/test_prefork.py | 18 ++--
celery/tests/utils/test_datastructures.py | 2 +-
celery/tests/worker/test_control.py | 4 +-
celery/utils/__init__.py | 13 +--
celery/utils/functional.py | 5 +-
celery/worker/__init__.py | 9 +-
celery/worker/autoreload.py | 13 +--
celery/worker/consumer.py | 4 +-
celery/worker/control.py | 24 +++---
celery/worker/job.py | 4 +-
33 files changed, 189 insertions(+), 180 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index c5b253396..995171e6e 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -65,7 +65,7 @@ def __init__(self, queues=None, default_exchange=None,
self.ha_policy = ha_policy
self.autoexchange = Exchange if autoexchange is None else autoexchange
if isinstance(queues, (tuple, list)):
- queues = dict((q.name, q) for q in queues)
+ queues = {q.name: q for q in queues}
for name, q in items(queues or {}):
self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q)
@@ -156,8 +156,9 @@ def select(self, include):
Can be iterable or string.
"""
if include:
- self._consume_from = dict((name, self[name])
- for name in maybe_list(include))
+ self._consume_from = {
+ name: self[name] for name in maybe_list(include)
+ }
select_subset = select # XXX compat
def deselect(self, exclude):
diff --git a/celery/app/defaults.py b/celery/app/defaults.py
index 15f7fcfb6..a9cc79914 100644
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -233,7 +233,7 @@ def flatten(d, ns=''):
stack.append((name + key + '_', value))
else:
yield name + key, value
-DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES))
+DEFAULTS = {key: value.default for key, value in flatten(NAMESPACES)}
def find_deprecated_settings(source):
diff --git a/celery/app/registry.py b/celery/app/registry.py
index 7046554d9..ce7b398e3 100644
--- a/celery/app/registry.py
+++ b/celery/app/registry.py
@@ -57,8 +57,8 @@ def periodic(self):
return self.filter_types('periodic')
def filter_types(self, type):
- return dict((name, task) for name, task in items(self)
- if getattr(task, 'type', 'regular') == type)
+ return {name: task for name, task in items(self)
+ if getattr(task, 'type', 'regular') == type}
def _unpickle_task(name):
diff --git a/celery/app/task.py b/celery/app/task.py
index dcb32a34d..48a5b2be2 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -724,9 +724,10 @@ def apply(self, args=None, kwargs=None,
'loglevel': options.get('loglevel', 0),
'delivery_info': {'is_eager': True}}
supported_keys = fun_takes_kwargs(task.run, default_kwargs)
- extend_with = dict((key, val)
- for key, val in items(default_kwargs)
- if key in supported_keys)
+ extend_with = {
+ key: val for key, val in items(default_kwargs)
+ if key in supported_keys
+ }
kwargs.update(extend_with)
tb = None
diff --git a/celery/app/utils.py b/celery/app/utils.py
index defdca7b8..ba5e1bb8b 100644
--- a/celery/app/utils.py
+++ b/celery/app/utils.py
@@ -117,11 +117,11 @@ def get_by_parts(self, *parts):
def table(self, with_defaults=False, censored=True):
filt = filter_hidden_settings if censored else lambda v: v
- return filt(dict(
- (k, v) for k, v in items(
+ return filt({
+ k: v for k, v in items(
self if with_defaults else self.without_defaults())
if k.isupper() and not k.startswith('_')
- ))
+ })
def humanize(self, with_defaults=False, censored=True):
"""Return a human readable string showing changes to the
@@ -182,7 +182,7 @@ def maybe_censor(key, value, mask='*' * 8):
return Connection(value).as_uri(mask=mask)
return value
- return dict((k, maybe_censor(k, v)) for k, v in items(conf))
+ return {k: maybe_censor(k, v) for k, v in items(conf)}
def bugreport(app):
diff --git a/celery/backends/base.py b/celery/backends/base.py
index 2a40f0dc9..437dd4c83 100644
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -435,14 +435,16 @@ def _strip_prefix(self, key):
def _mget_to_results(self, values, keys):
if hasattr(values, 'items'):
# client returns dict so mapping preserved.
- return dict((self._strip_prefix(k), self.decode(v))
- for k, v in items(values)
- if v is not None)
+ return {
+ self._strip_prefix(k): self.decode(v)
+ for k, v in items(values) if v is not None
+ }
else:
# client returns list so need to recreate mapping.
- return dict((bytes_to_str(keys[i]), self.decode(value))
- for i, value in enumerate(values)
- if value is not None)
+ return {
+ bytes_to_str(keys[i]): self.decode(value)
+ for i, value in enumerate(values) if value is not None
+ }
def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True,
READY_STATES=states.READY_STATES):
@@ -467,7 +469,7 @@ def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True,
r = self._mget_to_results(self.mget([self.get_key_for_task(k)
for k in keys]), keys)
cache.update(r)
- ids.difference_update(set(bytes_to_str(v) for v in r))
+ ids.difference_update({bytes_to_str(v) for v in r})
for key, value in items(r):
yield bytes_to_str(key), value
if timeout and iterations * interval >= timeout:
diff --git a/celery/backends/cache.py b/celery/backends/cache.py
index ac8710099..7062a001a 100644
--- a/celery/backends/cache.py
+++ b/celery/backends/cache.py
@@ -73,7 +73,7 @@ def get(self, key, *args, **kwargs):
def get_multi(self, keys):
cache = self.cache
- return dict((k, cache[k]) for k in keys if k in cache)
+ return {k: cache[k] for k in keys if k in cache}
def set(self, key, value, *args, **kwargs):
self.cache[key] = value
diff --git a/celery/beat.py b/celery/beat.py
index 0b2ec97f2..03f6b3a5d 100644
--- a/celery/beat.py
+++ b/celery/beat.py
@@ -295,9 +295,10 @@ def _maybe_entry(self, name, entry):
return self.Entry(**dict(entry, name=name, app=self.app))
def update_from_dict(self, dict_):
- self.schedule.update(dict(
- (name, self._maybe_entry(name, entry))
- for name, entry in items(dict_)))
+ self.schedule.update({
+ name: self._maybe_entry(name, entry)
+ for name, entry in items(dict_)
+ })
def merge_inplace(self, b):
schedule = self.schedule
diff --git a/celery/bin/base.py b/celery/bin/base.py
index 9ad794665..79bdb5c8a 100644
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -373,9 +373,10 @@ def handle_argv(self, prog_name, argv, command=None):
def prepare_args(self, options, args):
if options:
- options = dict((k, self.expanduser(v))
- for k, v in items(vars(options))
- if not k.startswith('_'))
+ options = {
+ k: self.expanduser(v)
+ for k, v in items(vars(options)) if not k.startswith('_')
+ }
args = [self.expanduser(arg) for arg in args]
self.check_args(args)
return options, args
diff --git a/celery/bin/celery.py b/celery/bin/celery.py
index 10d7c0324..3d0cf5d8f 100644
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -572,10 +572,10 @@ def run(self, force_ipython=False, force_bpython=False,
'signature': celery.signature}
if not without_tasks:
- self.locals.update(dict(
- (task.__name__, task) for task in values(self.app.tasks)
- if not task.name.startswith('celery.')),
- )
+ self.locals.update({
+ task.__name__: task for task in values(self.app.tasks)
+ if not task.name.startswith('celery.')
+ })
if force_python:
return self.invoke_fallback_shell()
diff --git a/celery/bin/graph.py b/celery/bin/graph.py
index 5d5847672..d8aa31187 100644
--- a/celery/bin/graph.py
+++ b/celery/bin/graph.py
@@ -34,7 +34,7 @@ def run(self, what=None, *args, **kwargs):
def bootsteps(self, *args, **kwargs):
worker = self.app.WorkController()
- include = set(arg.lower() for arg in args or ['worker', 'consumer'])
+ include = {arg.lower() for arg in args or ['worker', 'consumer']}
if 'worker' in include:
graph = worker.blueprint.graph
if 'consumer' in include:
diff --git a/celery/bin/multi.py b/celery/bin/multi.py
index ca14c0bfe..23ff496f7 100644
--- a/celery/bin/multi.py
+++ b/celery/bin/multi.py
@@ -116,9 +116,9 @@
__all__ = ['MultiTool']
-SIGNAMES = set(sig for sig in dir(signal)
- if sig.startswith('SIG') and '_' not in sig)
-SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES)
+SIGNAMES = {sig for sig in dir(signal)
+ if sig.startswith('SIG') and '_' not in sig}
+SIGMAP = {getattr(signal, name): name for name in SIGNAMES}
USAGE = """\
usage: {prog_name} start [worker options]
diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
index 5c4d5855c..a3906c492 100644
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -347,8 +347,9 @@ def __init__(self, processes=None, synack=False,
processes = self.cpu_count() if processes is None else processes
self.synack = synack
# create queue-pairs for all our processes in advance.
- self._queues = dict((self.create_process_queues(), None)
- for _ in range(processes))
+ self._queues = {
+ self.create_process_queues(): None for _ in range(processes)
+ }
# inqueue fileno -> process mapping
self._fileno_to_inq = {}
@@ -912,7 +913,7 @@ def flush(self):
self._busy_workers.clear()
def _flush_writer(self, proc, writer):
- fds = set([proc.inq._writer])
+ fds = {proc.inq._writer}
try:
while fds:
if not proc._is_alive():
@@ -941,9 +942,9 @@ def on_grow(self, n):
"""Grow the pool by ``n`` proceses."""
diff = max(self._processes - len(self._queues), 0)
if diff:
- self._queues.update(
- dict((self.create_process_queues(), None) for _ in range(diff))
- )
+ self._queues.update({
+ self.create_process_queues(): None for _ in range(diff)
+ })
def on_shrink(self, n):
"""Shrink the pool by ``n`` processes."""
@@ -1085,7 +1086,7 @@ def process_flush_queues(self, proc):
"""
resq = proc.outq._reader
on_state_change = self._result_handler.on_state_change
- fds = set([resq])
+ fds = {resq}
while fds and not resq.closed and self._state != TERMINATE:
readable, _, again = _select(fds, None, fds, timeout=0.01)
if readable:
diff --git a/celery/contrib/migrate.py b/celery/contrib/migrate.py
index c50fba2c7..c829cdb5a 100644
--- a/celery/contrib/migrate.py
+++ b/celery/contrib/migrate.py
@@ -250,7 +250,7 @@ def start_filter(app, conn, filter, limit=None, timeout=1.0,
if isinstance(tasks, string_t):
tasks = set(tasks.split(','))
if tasks is None:
- tasks = set([])
+ tasks = set()
def update_state(body, message):
state.count += 1
diff --git a/celery/datastructures.py b/celery/datastructures.py
index 9c36a3957..1551ab861 100644
--- a/celery/datastructures.py
+++ b/celery/datastructures.py
@@ -186,9 +186,9 @@ def topsort(self):
graph = DependencyGraph()
components = self._tarjan72()
- NC = dict((node, component)
- for component in components
- for node in component)
+ NC = {
+ node: component for component in components for node in component
+ }
for component in components:
graph.add_arc(component)
for node in self:
diff --git a/celery/events/__init__.py b/celery/events/__init__.py
index 931f3953e..b4ca9045c 100644
--- a/celery/events/__init__.py
+++ b/celery/events/__init__.py
@@ -112,7 +112,7 @@ class EventDispatcher(object):
You need to :meth:`close` this after use.
"""
- DISABLED_TRANSPORTS = set(['sql'])
+ DISABLED_TRANSPORTS = {'sql'}
app = None
@@ -300,7 +300,7 @@ def __init__(self, channel, handlers=None, routing_key='#',
self.adjust_clock = self.clock.adjust
self.forward_clock = self.clock.forward
if accept is None:
- accept = set([self.app.conf.CELERY_EVENT_SERIALIZER, 'json'])
+ accept = {self.app.conf.CELERY_EVENT_SERIALIZER, 'json'}
self.accept = accept
def _get_queue_arguments(self):
diff --git a/celery/events/state.py b/celery/events/state.py
index c78f2d08a..541f72226 100644
--- a/celery/events/state.py
+++ b/celery/events/state.py
@@ -222,7 +222,7 @@ def on_heartbeat(self, timestamp=None, local_received=None, **fields):
def _defaults(cls):
"""Deprecated, to be removed in 3.3"""
source = cls()
- return dict((k, getattr(source, k)) for k in cls._fields)
+ return {k: getattr(source, k) for k in cls._fields}
@with_unique_field('uuid')
@@ -295,9 +295,9 @@ def event(self, type_, timestamp=None, local_received=None, fields=None,
# this state logically happens-before the current state, so merge.
keep = self.merge_rules.get(state)
if keep is not None:
- fields = dict(
- (k, v) for k, v in items(fields) if k in keep
- )
+ fields = {
+ k: v for k, v in items(fields) if k in keep
+ }
for key, value in items(fields):
setattr(self, key, value)
else:
@@ -323,9 +323,9 @@ def __repr__(self):
def as_dict(self):
get = object.__getattribute__
- return dict(
- (k, get(self, k)) for k in self._fields
- )
+ return {
+ k: get(self, k) for k in self._fields
+ }
def __reduce__(self):
return _depickle_task, (self.__class__, self.as_dict())
@@ -379,7 +379,7 @@ def update(self, state, timestamp, fields,
def merge(self, state, timestamp, fields):
keep = self.merge_rules.get(state)
if keep is not None:
- fields = dict((k, v) for k, v in items(fields) if k in keep)
+ fields = {k: v for k, v in items(fields) if k in keep}
for key, value in items(fields):
setattr(self, key, value)
@@ -387,7 +387,7 @@ def merge(self, state, timestamp, fields):
def _defaults(cls):
"""Deprecated, to be removed in 3.3."""
source = cls()
- return dict((k, getattr(source, k)) for k in source._fields)
+ return {k: getattr(source, k) for k in source._fields}
class State(object):
@@ -436,9 +436,10 @@ def clear_tasks(self, ready=True):
def _clear_tasks(self, ready=True):
if ready:
- in_progress = dict(
- (uuid, task) for uuid, task in self.itertasks()
- if task.state not in states.READY_STATES)
+ in_progress = {
+ uuid: task for uuid, task in self.itertasks()
+ if task.state not in states.READY_STATES
+ }
self.tasks.clear()
self.tasks.update(in_progress)
else:
diff --git a/celery/five.py b/celery/five.py
index 77ec1daa5..99ecc28e3 100644
--- a/celery/five.py
+++ b/celery/five.py
@@ -127,7 +127,7 @@ def exec_(code, globs=None, locs=None): # pragma: no cover
exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")
-def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])):
+def with_metaclass(Type, skip_attrs={'__dict__', '__weakref__'}):
"""Class decorator to set metaclass.
Works with both Python 2 and Python 3 and it does not add
@@ -137,8 +137,8 @@ def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])):
"""
def _clone_with_metaclass(Class):
- attrs = dict((key, value) for key, value in items(vars(Class))
- if key not in skip_attrs)
+ attrs = {key: value for key, value in items(vars(Class))
+ if key not in skip_attrs}
return Type(Class.__name__, Class.__bases__, attrs)
return _clone_with_metaclass
@@ -191,7 +191,7 @@ def format_d(i): # noqa
The module %s is deprecated and will be removed in a future version.
"""
-DEFAULT_ATTRS = set(['__file__', '__path__', '__doc__', '__all__'])
+DEFAULT_ATTRS = {'__file__', '__path__', '__doc__', '__all__'}
# im_func is no longer available in Py3.
# instead the unbound method itself can be used.
@@ -327,8 +327,10 @@ def create_module(name, attrs, cls_attrs=None, pkg=None,
pkg, _, modname = name.rpartition('.')
cls_attrs['__module__'] = pkg
- attrs = dict((attr_name, prepare_attr(attr) if prepare_attr else attr)
- for attr_name, attr in items(attrs))
+ attrs = {
+ attr_name: (prepare_attr(attr) if prepare_attr else attr)
+ for attr_name, attr in items(attrs)
+ }
module = sys.modules[fqdn] = type(modname, (base, ), cls_attrs)(fqdn)
module.__dict__.update(attrs)
return module
@@ -350,8 +352,9 @@ def recreate_module(name, compat_modules=(), by_module={}, direct={},
))),
)
new_module = create_module(name, attrs, cls_attrs=cattrs, base=base)
- new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod))
- for mod in compat_modules))
+ new_module.__dict__.update({
+ mod: get_compat_module(new_module, mod) for mod in compat_modules
+ })
return old_module, new_module
@@ -375,7 +378,7 @@ def prepare(attr):
def get_origins(defs):
origins = {}
for module, attrs in items(defs):
- origins.update(dict((attr, module) for attr in attrs))
+ origins.update({attr: module for attr in attrs})
return origins
diff --git a/celery/result.py b/celery/result.py
index 3ea837c05..901d01933 100644
--- a/celery/result.py
+++ b/celery/result.py
@@ -676,9 +676,9 @@ def join_native(self, timeout=None, propagate=True,
"""
assert_will_not_block()
- order_index = None if callback else dict(
- (result.id, i) for i, result in enumerate(self.results)
- )
+ order_index = None if callback else {
+ result.id: i for i, result in enumerate(self.results)
+ }
acc = None if callback else [None for _ in range(len(self))]
for task_id, meta in self.iter_native(timeout, interval, no_ack):
value = meta['result']
diff --git a/celery/schedules.py b/celery/schedules.py
index 6424dfa04..18cf48190 100644
--- a/celery/schedules.py
+++ b/celery/schedules.py
@@ -383,7 +383,7 @@ def _expand_cronspec(cronspec, max_, min_=0):
int (like 7)
str (like '3-5,*/15', '*', or 'monday')
- set (like set([0,15,30,45]))
+ set (like {0,15,30,45}
list (like [8-17])
And convert it to an (expanded) set representing all time unit
@@ -403,7 +403,7 @@ def _expand_cronspec(cronspec, max_, min_=0):
"""
if isinstance(cronspec, numbers.Integral):
- result = set([cronspec])
+ result = {cronspec}
elif isinstance(cronspec, string_t):
result = crontab_parser(max_, min_).parse(cronspec)
elif isinstance(cronspec, set):
diff --git a/celery/task/http.py b/celery/task/http.py
index e170ec3a5..2c9d8604b 100644
--- a/celery/task/http.py
+++ b/celery/task/http.py
@@ -41,13 +41,13 @@ def utf8dict(tup):
from urllib2 import Request, urlopen # noqa
- def utf8dict(tup): # noqa
+ def utf8dict(tup, enc='utf-8'): # noqa
"""With a dict's items() tuple return a new dict with any utf-8
keys/values encoded."""
- return dict(
- (k.encode('utf-8'),
- v.encode('utf-8') if isinstance(v, unicode) else v) # noqa
- for k, v in tup)
+ return {
+ k.encode(enc): (v.encode(enc) if isinstance(v, unicode) else v)
+ for k, v in tup
+ }
class InvalidResponseError(Exception):
diff --git a/celery/tests/app/test_amqp.py b/celery/tests/app/test_amqp.py
index cf2810d5b..9ef9f572e 100644
--- a/celery/tests/app/test_amqp.py
+++ b/celery/tests/app/test_amqp.py
@@ -14,11 +14,11 @@ def test_accept_content(self):
self.app.conf.CELERY_ACCEPT_CONTENT = ['application/json']
self.assertEqual(
self.app.amqp.TaskConsumer(conn).accept,
- set(['application/json'])
+ {'application/json'},
)
self.assertEqual(
self.app.amqp.TaskConsumer(conn, accept=['json']).accept,
- set(['application/json']),
+ {'application/json'},
)
diff --git a/celery/tests/app/test_schedules.py b/celery/tests/app/test_schedules.py
index 8cb3d6d4c..90f49125b 100644
--- a/celery/tests/app/test_schedules.py
+++ b/celery/tests/app/test_schedules.py
@@ -54,65 +54,63 @@ def test_parse_range(self):
def test_parse_range_wraps(self):
self.assertEqual(crontab_parser(12).parse('11-1'),
- set([11, 0, 1]))
+ {11, 0, 1})
self.assertEqual(crontab_parser(60, 1).parse('2-1'),
set(range(1, 60 + 1)))
def test_parse_groups(self):
self.assertEqual(crontab_parser().parse('1,2,3,4'),
- set([1, 2, 3, 4]))
+ {1, 2, 3, 4})
self.assertEqual(crontab_parser().parse('0,15,30,45'),
- set([0, 15, 30, 45]))
+ {0, 15, 30, 45})
self.assertEqual(crontab_parser(min_=1).parse('1,2,3,4'),
- set([1, 2, 3, 4]))
+ {1, 2, 3, 4})
def test_parse_steps(self):
self.assertEqual(crontab_parser(8).parse('*/2'),
- set([0, 2, 4, 6]))
+ {0, 2, 4, 6})
self.assertEqual(crontab_parser().parse('*/2'),
- set(i * 2 for i in range(30)))
+ {i * 2 for i in range(30)})
self.assertEqual(crontab_parser().parse('*/3'),
- set(i * 3 for i in range(20)))
+ {i * 3 for i in range(20)})
self.assertEqual(crontab_parser(8, 1).parse('*/2'),
- set([1, 3, 5, 7]))
+ {1, 3, 5, 7})
self.assertEqual(crontab_parser(min_=1).parse('*/2'),
- set(i * 2 + 1 for i in range(30)))
+ {i * 2 + 1 for i in range(30)})
self.assertEqual(crontab_parser(min_=1).parse('*/3'),
- set(i * 3 + 1 for i in range(20)))
+ {i * 3 + 1 for i in range(20)})
def test_parse_composite(self):
- self.assertEqual(crontab_parser(8).parse('*/2'), set([0, 2, 4, 6]))
- self.assertEqual(crontab_parser().parse('2-9/5'), set([2, 7]))
- self.assertEqual(crontab_parser().parse('2-10/5'), set([2, 7]))
+ self.assertEqual(crontab_parser(8).parse('*/2'), {0, 2, 4, 6})
+ self.assertEqual(crontab_parser().parse('2-9/5'), {2, 7})
+ self.assertEqual(crontab_parser().parse('2-10/5'), {2, 7})
self.assertEqual(
crontab_parser(min_=1).parse('55-5/3'),
- set([55, 58, 1, 4]),
+ {55, 58, 1, 4},
)
- self.assertEqual(crontab_parser().parse('2-11/5,3'), set([2, 3, 7]))
+ self.assertEqual(crontab_parser().parse('2-11/5,3'), {2, 3, 7})
self.assertEqual(
crontab_parser().parse('2-4/3,*/5,0-21/4'),
- set([0, 2, 4, 5, 8, 10, 12, 15, 16,
- 20, 25, 30, 35, 40, 45, 50, 55]),
+ {0, 2, 4, 5, 8, 10, 12, 15, 16, 20, 25, 30, 35, 40, 45, 50, 55},
)
self.assertEqual(
crontab_parser().parse('1-9/2'),
- set([1, 3, 5, 7, 9]),
+ {1, 3, 5, 7, 9},
)
- self.assertEqual(crontab_parser(8, 1).parse('*/2'), set([1, 3, 5, 7]))
- self.assertEqual(crontab_parser(min_=1).parse('2-9/5'), set([2, 7]))
- self.assertEqual(crontab_parser(min_=1).parse('2-10/5'), set([2, 7]))
+ self.assertEqual(crontab_parser(8, 1).parse('*/2'), {1, 3, 5, 7})
+ self.assertEqual(crontab_parser(min_=1).parse('2-9/5'), {2, 7})
+ self.assertEqual(crontab_parser(min_=1).parse('2-10/5'), {2, 7})
self.assertEqual(
crontab_parser(min_=1).parse('2-11/5,3'),
- set([2, 3, 7]),
+ {2, 3, 7},
)
self.assertEqual(
crontab_parser(min_=1).parse('2-4/3,*/5,1-21/4'),
- set([1, 2, 5, 6, 9, 11, 13, 16, 17,
- 21, 26, 31, 36, 41, 46, 51, 56]),
+ {1, 2, 5, 6, 9, 11, 13, 16, 17, 21, 26, 31, 36, 41, 46, 51, 56},
)
self.assertEqual(
crontab_parser(min_=1).parse('1-9/2'),
- set([1, 3, 5, 7, 9]),
+ {1, 3, 5, 7, 9},
)
def test_parse_errors_on_empty_string(self):
@@ -148,11 +146,11 @@ def test_parse_errors_on_gt_max(self):
def test_expand_cronspec_eats_iterables(self):
self.assertEqual(
crontab._expand_cronspec(iter([1, 2, 3]), 100),
- set([1, 2, 3]),
+ {1, 2, 3},
)
self.assertEqual(
crontab._expand_cronspec(iter([1, 2, 3]), 100, 1),
- set([1, 2, 3]),
+ {1, 2, 3},
)
def test_expand_cronspec_invalid_type(self):
@@ -408,7 +406,7 @@ def test_default_crontab_spec(self):
def test_simple_crontab_spec(self):
c = self.crontab(minute=30)
- self.assertEqual(c.minute, set([30]))
+ self.assertEqual(c.minute, {30})
self.assertEqual(c.hour, set(range(24)))
self.assertEqual(c.day_of_week, set(range(7)))
self.assertEqual(c.day_of_month, set(range(1, 32)))
@@ -416,13 +414,13 @@ def test_simple_crontab_spec(self):
def test_crontab_spec_minute_formats(self):
c = self.crontab(minute=30)
- self.assertEqual(c.minute, set([30]))
+ self.assertEqual(c.minute, {30})
c = self.crontab(minute='30')
- self.assertEqual(c.minute, set([30]))
+ self.assertEqual(c.minute, {30})
c = self.crontab(minute=(30, 40, 50))
- self.assertEqual(c.minute, set([30, 40, 50]))
- c = self.crontab(minute=set([30, 40, 50]))
- self.assertEqual(c.minute, set([30, 40, 50]))
+ self.assertEqual(c.minute, {30, 40, 50})
+ c = self.crontab(minute={30, 40, 50})
+ self.assertEqual(c.minute, {30, 40, 50})
def test_crontab_spec_invalid_minute(self):
with self.assertRaises(ValueError):
@@ -432,11 +430,11 @@ def test_crontab_spec_invalid_minute(self):
def test_crontab_spec_hour_formats(self):
c = self.crontab(hour=6)
- self.assertEqual(c.hour, set([6]))
+ self.assertEqual(c.hour, {6})
c = self.crontab(hour='5')
- self.assertEqual(c.hour, set([5]))
+ self.assertEqual(c.hour, {5})
c = self.crontab(hour=(4, 8, 12))
- self.assertEqual(c.hour, set([4, 8, 12]))
+ self.assertEqual(c.hour, {4, 8, 12})
def test_crontab_spec_invalid_hour(self):
with self.assertRaises(ValueError):
@@ -446,17 +444,17 @@ def test_crontab_spec_invalid_hour(self):
def test_crontab_spec_dow_formats(self):
c = self.crontab(day_of_week=5)
- self.assertEqual(c.day_of_week, set([5]))
+ self.assertEqual(c.day_of_week, {5})
c = self.crontab(day_of_week='5')
- self.assertEqual(c.day_of_week, set([5]))
+ self.assertEqual(c.day_of_week, {5})
c = self.crontab(day_of_week='fri')
- self.assertEqual(c.day_of_week, set([5]))
+ self.assertEqual(c.day_of_week, {5})
c = self.crontab(day_of_week='tuesday,sunday,fri')
- self.assertEqual(c.day_of_week, set([0, 2, 5]))
+ self.assertEqual(c.day_of_week, {0, 2, 5})
c = self.crontab(day_of_week='mon-fri')
- self.assertEqual(c.day_of_week, set([1, 2, 3, 4, 5]))
+ self.assertEqual(c.day_of_week, {1, 2, 3, 4, 5})
c = self.crontab(day_of_week='*/2')
- self.assertEqual(c.day_of_week, set([0, 2, 4, 6]))
+ self.assertEqual(c.day_of_week, {0, 2, 4, 6})
def test_crontab_spec_invalid_dow(self):
with self.assertRaises(ValueError):
@@ -470,13 +468,13 @@ def test_crontab_spec_invalid_dow(self):
def test_crontab_spec_dom_formats(self):
c = self.crontab(day_of_month=5)
- self.assertEqual(c.day_of_month, set([5]))
+ self.assertEqual(c.day_of_month, {5})
c = self.crontab(day_of_month='5')
- self.assertEqual(c.day_of_month, set([5]))
+ self.assertEqual(c.day_of_month, {5})
c = self.crontab(day_of_month='2,4,6')
- self.assertEqual(c.day_of_month, set([2, 4, 6]))
+ self.assertEqual(c.day_of_month, {2, 4, 6})
c = self.crontab(day_of_month='*/5')
- self.assertEqual(c.day_of_month, set([1, 6, 11, 16, 21, 26, 31]))
+ self.assertEqual(c.day_of_month, {1, 6, 11, 16, 21, 26, 31})
def test_crontab_spec_invalid_dom(self):
with self.assertRaises(ValueError):
@@ -490,15 +488,15 @@ def test_crontab_spec_invalid_dom(self):
def test_crontab_spec_moy_formats(self):
c = self.crontab(month_of_year=1)
- self.assertEqual(c.month_of_year, set([1]))
+ self.assertEqual(c.month_of_year, {1})
c = self.crontab(month_of_year='1')
- self.assertEqual(c.month_of_year, set([1]))
+ self.assertEqual(c.month_of_year, {1})
c = self.crontab(month_of_year='2,4,6')
- self.assertEqual(c.month_of_year, set([2, 4, 6]))
+ self.assertEqual(c.month_of_year, {2, 4, 6})
c = self.crontab(month_of_year='*/2')
- self.assertEqual(c.month_of_year, set([1, 3, 5, 7, 9, 11]))
+ self.assertEqual(c.month_of_year, {1, 3, 5, 7, 9, 11})
c = self.crontab(month_of_year='2-12/2')
- self.assertEqual(c.month_of_year, set([2, 4, 6, 8, 10, 12]))
+ self.assertEqual(c.month_of_year, {2, 4, 6, 8, 10, 12})
def test_crontab_spec_invalid_moy(self):
with self.assertRaises(ValueError):
diff --git a/celery/tests/concurrency/test_prefork.py b/celery/tests/concurrency/test_prefork.py
index 7ad247436..8216531ba 100644
--- a/celery/tests/concurrency/test_prefork.py
+++ b/celery/tests/concurrency/test_prefork.py
@@ -153,13 +153,13 @@ def test_select(self):
with patch('select.select') as select:
select.return_value = ([3], [], [])
self.assertEqual(
- asynpool._select(set([3])),
+ asynpool._select({3}),
([3], [], 0),
)
select.return_value = ([], [], [3])
self.assertEqual(
- asynpool._select(set([3]), None, set([3])),
+ asynpool._select({3}, None, {3}),
([3], [], 0),
)
@@ -167,13 +167,13 @@ def test_select(self):
eintr.errno = errno.EINTR
select.side_effect = eintr
- readers = set([3])
+ readers = {3}
self.assertEqual(asynpool._select(readers), ([], [], 1))
self.assertIn(3, readers)
with patch('select.select') as select:
select.side_effect = ebadf
- readers = set([3])
+ readers = {3}
self.assertEqual(asynpool._select(readers), ([], [], 1))
select.assert_has_calls([call([3], [], [], 0)])
self.assertNotIn(3, readers)
@@ -181,7 +181,7 @@ def test_select(self):
with patch('select.select') as select:
select.side_effect = MemoryError()
with self.assertRaises(MemoryError):
- asynpool._select(set([1]))
+ asynpool._select({1})
with patch('select.select') as select:
@@ -190,7 +190,7 @@ def se(*args):
raise ebadf
select.side_effect = se
with self.assertRaises(MemoryError):
- asynpool._select(set([3]))
+ asynpool._select({3})
with patch('select.select') as select:
@@ -200,14 +200,14 @@ def se2(*args):
raise ebadf
select.side_effect = se2
with self.assertRaises(socket.error):
- asynpool._select(set([3]))
+ asynpool._select({3})
with patch('select.select') as select:
select.side_effect = socket.error()
select.side_effect.errno = 34134
with self.assertRaises(socket.error):
- asynpool._select(set([3]))
+ asynpool._select({3})
def test_promise(self):
fun = Mock()
@@ -309,7 +309,7 @@ def test_restart(self):
raise SkipTest('functional test')
def get_pids(pool):
- return set([p.pid for p in pool._pool._pool])
+ return {p.pid for p in pool._pool._pool}
tp = self.TaskPool(5)
time.sleep(0.5)
diff --git a/celery/tests/utils/test_datastructures.py b/celery/tests/utils/test_datastructures.py
index f26fe86f7..e9ee0f7d8 100644
--- a/celery/tests/utils/test_datastructures.py
+++ b/celery/tests/utils/test_datastructures.py
@@ -220,7 +220,7 @@ def test_purge(self):
s.purge()
hp.assert_called_with(s._heap)
with patch('celery.datastructures.heappop') as hp:
- s._data = dict((i * 2, i * 2) for i in range(10))
+ s._data = {i * 2: i * 2 for i in range(10)}
s.purge()
self.assertEqual(hp.call_count, 10)
diff --git a/celery/tests/worker/test_control.py b/celery/tests/worker/test_control.py
index e613440ff..bb7df0daf 100644
--- a/celery/tests/worker/test_control.py
+++ b/celery/tests/worker/test_control.py
@@ -141,7 +141,7 @@ def test_enable_events(self):
evd.groups = set()
panel.handle('enable_events')
self.assertIn('task', evd.groups)
- evd.groups = set(['task'])
+ evd.groups = {'task'}
self.assertIn('already enabled', panel.handle('enable_events')['ok'])
def test_disable_events(self):
@@ -149,7 +149,7 @@ def test_disable_events(self):
panel = self.create_panel(consumer=consumer)
evd = consumer.event_dispatcher
evd.enabled = True
- evd.groups = set(['task'])
+ evd.groups = {'task'}
panel.handle('disable_events')
self.assertNotIn('task', evd.groups)
self.assertIn('already disabled', panel.handle('disable_events')['ok'])
diff --git a/celery/utils/__init__.py b/celery/utils/__init__.py
index 24205090b..4045a85f8 100644
--- a/celery/utils/__init__.py
+++ b/celery/utils/__init__.py
@@ -176,8 +176,8 @@ def lpmerge(L, R):
"""In place left precedent dictionary merge.
Keeps values from `L`, if the value in `R` is :const:`None`."""
- set = L.__setitem__
- [set(k, v) for k, v in items(R) if v is not None]
+ setitem = L.__setitem__
+ [setitem(k, v) for k, v in items(R) if v is not None]
return L
@@ -214,7 +214,7 @@ def cry(out=None, sepchr='=', seplen=49): # pragma: no cover
# get a map of threads by their ID so we can print their names
# during the traceback dump
- tmap = dict((t.ident, t) for t in threading.enumerate())
+ tmap = {t.ident: t for t in threading.enumerate()}
sep = sepchr * seplen
for tid, frame in items(sys._current_frames()):
@@ -276,9 +276,10 @@ def jsonify(obj,
elif isinstance(obj, (tuple, list)):
return [_jsonify(v) for v in obj]
elif isinstance(obj, dict):
- return dict((k, _jsonify(v, key=k))
- for k, v in items(obj)
- if (keyfilter(k) if keyfilter else 1))
+ return {
+ k: _jsonify(v, key=k) for k, v in items(obj)
+ if (keyfilter(k) if keyfilter else 1)
+ }
elif isinstance(obj, datetime.datetime):
# See "Date Time String Format" in the ECMA-262 specification.
r = obj.isoformat()
diff --git a/celery/utils/functional.py b/celery/utils/functional.py
index faa272b32..8903ff08d 100644
--- a/celery/utils/functional.py
+++ b/celery/utils/functional.py
@@ -265,8 +265,7 @@ def padlist(container, size, default=None):
def mattrgetter(*attrs):
"""Like :func:`operator.itemgetter` but return :const:`None` on missing
attributes instead of raising :exc:`AttributeError`."""
- return lambda obj: dict((attr, getattr(obj, attr, None))
- for attr in attrs)
+ return lambda obj: {attr: getattr(obj, attr, None) for attr in attrs}
def uniq(it):
@@ -303,4 +302,4 @@ def data(self):
def dictfilter(d=None, **kw):
"""Remove all keys from dict ``d`` whose value is :const:`None`"""
d = kw if d is None else (dict(d, **kw) if kw else d)
- return dict((k, v) for k, v in items(d) if v is not None)
+ return {k: v for k, v in items(d) if v is not None}
diff --git a/celery/worker/__init__.py b/celery/worker/__init__.py
index 217902d2e..29a095939 100644
--- a/celery/worker/__init__.py
+++ b/celery/worker/__init__.py
@@ -76,7 +76,7 @@ class WorkController(object):
class Blueprint(bootsteps.Blueprint):
"""Worker bootstep blueprint."""
name = 'Worker'
- default_steps = set([
+ default_steps = {
'celery.worker.components:Hub',
'celery.worker.components:Queues',
'celery.worker.components:Pool',
@@ -86,8 +86,7 @@ class Blueprint(bootsteps.Blueprint):
'celery.worker.components:Consumer',
'celery.worker.autoscale:WorkerComponent',
'celery.worker.autoreload:WorkerComponent',
-
- ])
+ }
def __init__(self, app=None, hostname=None, **kwargs):
self.app = app or self.app
@@ -190,8 +189,8 @@ def setup_includes(self, includes):
prev += tuple(includes)
[self.app.loader.import_task_module(m) for m in includes]
self.include = includes
- task_modules = set(task.__class__.__module__
- for task in values(self.app.tasks))
+ task_modules = {task.__class__.__module__
+ for task in values(self.app.tasks)}
self.app.conf.CELERY_INCLUDE = tuple(set(prev) | task_modules)
def prepare_args(self, **kwargs):
diff --git a/celery/worker/autoreload.py b/celery/worker/autoreload.py
index 8ade32fb2..03dcc8efd 100644
--- a/celery/worker/autoreload.py
+++ b/celery/worker/autoreload.py
@@ -107,8 +107,8 @@ def register_with_event_loop(self, hub):
def find_changes(self):
maybe_modified = self._maybe_modified
- modified = dict((f, mt) for f, mt in self._mtimes()
- if maybe_modified(f, mt))
+ modified = {f: mt for f, mt in self._mtimes()
+ if maybe_modified(f, mt)}
if modified:
self.on_change(modified)
self.modify_times.update(modified)
@@ -131,7 +131,7 @@ class KQueueMonitor(BaseMonitor):
def __init__(self, *args, **kwargs):
super(KQueueMonitor, self).__init__(*args, **kwargs)
- self.filemap = dict((f, None) for f in self.files)
+ self.filemap = {f: None for f in self.files}
self.fdmap = {}
def register_with_event_loop(self, hub):
@@ -257,13 +257,14 @@ def __init__(self, controller, modules=None, monitor_cls=None, **options):
def on_init(self):
files = self.file_to_module
- files.update(dict(
- (module_file(sys.modules[m]), m) for m in self.modules))
+ files.update({
+ module_file(sys.modules[m]): m for m in self.modules
+ })
self._monitor = self.Monitor(
files, self.on_change,
shutdown_event=self._is_shutdown, **self.options)
- self._hashes = dict([(f, file_hash(f)) for f in files])
+ self._hashes = {f: file_hash(f) for f in files}
def register_with_event_loop(self, hub):
if self._monitor is None:
diff --git a/celery/worker/consumer.py b/celery/worker/consumer.py
index 16fa0ff4e..16f0b2ff5 100644
--- a/celery/worker/consumer.py
+++ b/celery/worker/consumer.py
@@ -550,7 +550,7 @@ def stop(self, c):
class Mingle(bootsteps.StartStopStep):
label = 'Mingle'
requires = (Events, )
- compatible_transports = set(['amqp', 'redis'])
+ compatible_transports = {'amqp', 'redis'}
def __init__(self, c, without_mingle=False, **kwargs):
self.enabled = not without_mingle and self.compatible_transport(c.app)
@@ -643,7 +643,7 @@ class Gossip(bootsteps.ConsumerStep):
_cons_stamp_fields = itemgetter(
'id', 'clock', 'hostname', 'pid', 'topic', 'action', 'cver',
)
- compatible_transports = set(['amqp', 'redis'])
+ compatible_transports = {'amqp', 'redis'}
def __init__(self, c, without_gossip=False, interval=5.0, **kwargs):
self.enabled = not without_gossip and self.compatible_transport(c.app)
diff --git a/celery/worker/control.py b/celery/worker/control.py
index fcaf04081..8de8ac838 100644
--- a/celery/worker/control.py
+++ b/celery/worker/control.py
@@ -56,15 +56,14 @@ def query_task(state, ids, **kwargs):
def reqinfo(state, req):
return state, req.info()
- reqs = dict((req.id, ('reserved', req.info()))
- for req in _find_requests_by_id(
- ids, worker_state.reserved_requests))
- reqs.update(dict(
- (req.id, ('active', req.info()))
- for req in _find_requests_by_id(
- ids, worker_state.active_requests,
- )
- ))
+ reqs = {
+ req.id: ('reserved', req.info())
+ for req in _find_requests_by_id(ids, worker_state.reserved_requests)
+ }
+ reqs.update({
+ req.id: ('active', req.info())
+ for req in _find_requests_by_id(ids, worker_state.active_requests)
+ })
return reqs
@@ -280,9 +279,10 @@ def dump_tasks(state, taskinfoitems=None, **kwargs):
taskinfoitems = taskinfoitems or DEFAULT_TASK_INFO_ITEMS
def _extract_info(task):
- fields = dict((field, str(getattr(task, field, None)))
- for field in taskinfoitems
- if getattr(task, field, None) is not None)
+ fields = {
+ field: str(getattr(task, field, None)) for field in taskinfoitems
+ if getattr(task, field, None) is not None
+ }
if fields:
info = ['='.join(f) for f in items(fields)]
return '{0} [{1}]'.format(task.name, ' '.join(info))
diff --git a/celery/worker/job.py b/celery/worker/job.py
index 72946d0d3..8522d0091 100644
--- a/celery/worker/job.py
+++ b/celery/worker/job.py
@@ -221,8 +221,8 @@ def extend_with_default_kwargs(self):
'delivery_info': self.delivery_info}
fun = self.task.run
supported_keys = fun_takes_kwargs(fun, default_kwargs)
- extend_with = dict((key, val) for key, val in items(default_kwargs)
- if key in supported_keys)
+ extend_with = {key: val for key, val in items(default_kwargs)
+ if key in supported_keys}
kwargs.update(extend_with)
return kwargs
From e80c545f6a0fce43676bcc18b3262909ce2023d7 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 27 Mar 2014 16:19:52 +0000
Subject: [PATCH 0033/1103] Removes Py2.6 workaround for missing WeakSet
---
celery/_state.py | 27 +--------------------------
1 file changed, 1 insertion(+), 26 deletions(-)
diff --git a/celery/_state.py b/celery/_state.py
index 3af39bf91..80a3b112d 100644
--- a/celery/_state.py
+++ b/celery/_state.py
@@ -19,31 +19,6 @@
from celery.local import Proxy
from celery.utils.threads import LocalStack
-try:
- from weakref import WeakSet as AppSet
-except ImportError: # XXX Py2.6
-
- class AppSet(object): # noqa
-
- def __init__(self):
- self._refs = set()
-
- def add(self, app):
- self._refs.add(weakref.ref(app))
-
- def __iter__(self):
- dirty = []
- try:
- for appref in self._refs:
- app = appref()
- if app is None:
- dirty.append(appref)
- else:
- yield app
- finally:
- while dirty:
- self._refs.discard(dirty.pop())
-
__all__ = ['set_default_app', 'get_current_app', 'get_current_task',
'get_current_worker_task', 'current_app', 'current_task']
@@ -51,7 +26,7 @@ def __iter__(self):
default_app = None
#: List of all app instances (weakrefs), must not be used directly.
-_apps = AppSet()
+_apps = weakref.WeakSet()
_task_join_will_block = False
From 524421a36dcc838a5f51e5cf122902aab774bad1 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 27 Mar 2014 16:21:09 +0000
Subject: [PATCH 0034/1103] [3.2] Requires Py2.7+
---
.travis.yml | 1 -
setup.py | 5 ++---
tox.ini | 10 ----------
3 files changed, 2 insertions(+), 14 deletions(-)
diff --git a/.travis.yml b/.travis.yml
index c8341f045..3690f624f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,7 +4,6 @@ env:
global:
PYTHONUNBUFFERED=yes
matrix:
- - TOXENV=2.6
- TOXENV=2.7
- TOXENV=3.3
- TOXENV=3.4
diff --git a/setup.py b/setup.py
index 24ed03769..2767346f0 100644
--- a/setup.py
+++ b/setup.py
@@ -19,8 +19,8 @@
CELERY_COMPAT_PROGRAMS = int(os.environ.get('CELERY_COMPAT_PROGRAMS', 1))
-if sys.version_info < (2, 6):
- raise Exception('Celery 3.1 requires Python 2.6 or higher.')
+if sys.version_info < (2, 7):
+ raise Exception('Celery 3.2 requires Python 2.7 or higher.')
downgrade_packages = [
'celery.app.task',
@@ -67,7 +67,6 @@
Topic :: Software Development :: Object Brokering
Programming Language :: Python
Programming Language :: Python :: 2
- Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
diff --git a/tox.ini b/tox.ini
index d8605c74d..bde53e19a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,5 @@
[tox]
envlist =
- 2.6,
2.7,
3.3,
3.4,
@@ -37,15 +36,6 @@ setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
-[testenv:2.6]
-basepython = python2.6
-deps = -r{toxinidir}/requirements/default.txt
- -r{toxinidir}/requirements/test.txt
- -r{toxinidir}/requirements/test-ci.txt
-setenv = C_DEBUG_TEST = 1
-commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
- nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
-
[testenv:pypy]
basepython = pypy
deps = -r{toxinidir}/requirements/default.txt
From 663e4d3a0b457e02e0a92d5a751d4046da96c286 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 27 Mar 2014 17:06:04 +0000
Subject: [PATCH 0035/1103] create_task_message for task protocol v2
---
celery/app/amqp.py | 92 ++++++++++++++++++++++++++++++++++++------
celery/app/defaults.py | 1 +
2 files changed, 81 insertions(+), 12 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index 995171e6e..60221a454 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -213,6 +213,14 @@ class AMQP(object):
def __init__(self, app):
self.app = app
+ self.task_protocols = {
+ 1: self.as_task_v1,
+ 2: self.as_task_v2,
+ }
+
+ @cached_property
+ def create_task_message(self):
+ return self.task_protocols[self.app.conf.CELERY_TASK_PROTOCOL]
@cached_property
def _task_retry(self):
@@ -303,12 +311,70 @@ def default_exchange(self):
return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE,
self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE)
- def create_task_message(self, task_id, name, args=None, kwargs=None,
- countdown=None, eta=None, group_id=None,
- expires=None, now=None, retries=0, chord=None,
- callbacks=None, errbacks=None, reply_to=None,
- time_limit=None, soft_time_limit=None,
- create_sent_event=False):
+ def as_task_v2(self, task_id, name, args=None, kwargs=None,
+ countdown=None, eta=None, group_id=None,
+ expires=None, now=None, retries=0, chord=None,
+ callbacks=None, errbacks=None, reply_to=None,
+ time_limit=None, soft_time_limit=None,
+ create_sent_event=False, timezone=None):
+ args = args or ()
+ kwargs = kwargs or {}
+ utc = self.utc
+ if not isinstance(args, (list, tuple)):
+ raise ValueError('task args must be a list or tuple')
+ if not isinstance(kwargs, Mapping):
+ raise ValueError('task keyword arguments must be a mapping')
+ if countdown: # convert countdown to ETA
+ now = now or self.app.now()
+ timezone = timezone or self.app.timezone
+ eta = now + timedelta(seconds=countdown)
+ if utc:
+ eta = to_utc(eta).astimezone(timezone)
+ if isinstance(expires, numbers.Real):
+ now = now or self.app.now()
+ timezone = timezone or self.app.timezone
+ expires = now + timedelta(seconds=expires)
+ if utc:
+ expires = to_utc(expires).astimezone(timezone)
+ eta = eta and eta.isoformat()
+ expires = expires and expires.isoformat()
+
+ return task_message(
+ headers={
+ 'lang': 'py',
+ 'c_type': name,
+ 'eta': eta,
+ 'expires': expires,
+ 'callbacks': callbacks,
+ 'errbacks': errbacks,
+ 'chain': None, # TODO
+ 'group': group_id,
+ 'chord': chord,
+ 'retries': retries,
+ 'timelimit': (time_limit, soft_time_limit),
+ },
+ properties={
+ 'correlation_id': task_id,
+ 'reply_to': reply_to,
+ },
+ body=(args, kwargs),
+ sent_event={
+ 'uuid': task_id,
+ 'name': name,
+ 'args': safe_repr(args),
+ 'kwargs': safe_repr(kwargs),
+ 'retries': retries,
+ 'eta': eta,
+ 'expires': expires,
+ } if create_sent_event else None,
+ )
+
+ def as_task_v1(self, task_id, name, args=None, kwargs=None,
+ countdown=None, eta=None, group_id=None,
+ expires=None, now=None, timezone=None, retries=0,
+ chord=None, callbacks=None, errbacks=None, reply_to=None,
+ time_limit=None, soft_time_limit=None,
+ create_sent_event=False):
args = args or ()
kwargs = kwargs or {}
utc = self.utc
@@ -318,24 +384,26 @@ def create_task_message(self, task_id, name, args=None, kwargs=None,
raise ValueError('task keyword arguments must be a mapping')
if countdown: # convert countdown to ETA
now = now or self.app.now()
+ timezone = timezone or self.app.timezone
eta = now + timedelta(seconds=countdown)
if utc:
- eta = to_utc(eta).astimezone(self.app.timezone)
+ eta = to_utc(eta).astimezone(timezone)
if isinstance(expires, numbers.Real):
now = now or self.app.now()
+ timezone = timezone or self.app.timezone
expires = now + timedelta(seconds=expires)
if utc:
- expires = to_utc(expires).astimezone(self.app.timezone)
+ expires = to_utc(expires).astimezone(timezone)
eta = eta and eta.isoformat()
expires = expires and expires.isoformat()
return task_message(
- {},
- {
+ headers={},
+ properties={
'correlation_id': task_id,
'reply_to': reply_to,
},
- {
+ body={
'task': name,
'id': task_id,
'args': args,
@@ -350,7 +418,7 @@ def create_task_message(self, task_id, name, args=None, kwargs=None,
'taskset': group_id,
'chord': chord,
},
- {
+ sent_event={
'uuid': task_id,
'name': name,
'args': safe_repr(args),
diff --git a/celery/app/defaults.py b/celery/app/defaults.py
index a9cc79914..b09cc0256 100644
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -146,6 +146,7 @@ def __repr__(self):
'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
+ 'TASK_PROTOCOL': Option(1, type='int'),
'TASK_PUBLISH_RETRY': Option(True, type='bool'),
'TASK_PUBLISH_RETRY_POLICY': Option({
'max_retries': 3,
From 0990a17b9ddb6ad37ed71dd8cffb559674110691 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 27 Mar 2014 17:43:19 +0000
Subject: [PATCH 0036/1103] Fixes amqp publish
---
celery/app/amqp.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index 60221a454..a535ac7bb 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -355,7 +355,7 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
},
properties={
'correlation_id': task_id,
- 'reply_to': reply_to,
+ 'reply_to': reply_to or '',
},
body=(args, kwargs),
sent_event={
@@ -401,7 +401,7 @@ def as_task_v1(self, task_id, name, args=None, kwargs=None,
headers={},
properties={
'correlation_id': task_id,
- 'reply_to': reply_to,
+ 'reply_to': reply_to or '',
},
body={
'task': name,
From 79fd299a164b9b1b22c5bafa4f23c325c474cb56 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 27 Mar 2014 17:47:59 +0000
Subject: [PATCH 0037/1103] Fixes amqp publish #2
---
celery/app/amqp.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index a535ac7bb..299775e2d 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -313,10 +313,10 @@ def default_exchange(self):
def as_task_v2(self, task_id, name, args=None, kwargs=None,
countdown=None, eta=None, group_id=None,
- expires=None, now=None, retries=0, chord=None,
+ expires=None, retries=0, chord=None,
callbacks=None, errbacks=None, reply_to=None,
time_limit=None, soft_time_limit=None,
- create_sent_event=False, timezone=None):
+ create_sent_event=False, now=None, timezone=None):
args = args or ()
kwargs = kwargs or {}
utc = self.utc
@@ -371,10 +371,10 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
def as_task_v1(self, task_id, name, args=None, kwargs=None,
countdown=None, eta=None, group_id=None,
- expires=None, now=None, timezone=None, retries=0,
+ expires=None, retries=0,
chord=None, callbacks=None, errbacks=None, reply_to=None,
time_limit=None, soft_time_limit=None,
- create_sent_event=False):
+ create_sent_event=False, now=None, timezone=None):
args = args or ()
kwargs = kwargs or {}
utc = self.utc
From 5d88b18ea52e9c00b82102b189ff9524af27e450 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 28 Mar 2014 13:22:17 +0000
Subject: [PATCH 0038/1103] Reorganizes AMQP class
---
celery/app/amqp.py | 96 ++++++++++++++++++++--------------------------
1 file changed, 42 insertions(+), 54 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index 299775e2d..6db1d7315 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -223,19 +223,8 @@ def create_task_message(self):
return self.task_protocols[self.app.conf.CELERY_TASK_PROTOCOL]
@cached_property
- def _task_retry(self):
- return self.app.conf.CELERY_TASK_PUBLISH_RETRY
-
- @cached_property
- def _task_retry_policy(self):
- return self.app.conf.CELERY_TASK_PUBLISH_RETRY_POLICY
-
- @cached_property
- def _task_sent_event(self):
- return self.app.conf.CELERY_SEND_TASK_SENT_EVENT
-
- def flush_routes(self):
- self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)
+ def send_task_message(self):
+ return self._create_task_sender()
def Queues(self, queues, create_missing=None, ha_policy=None,
autoexchange=None):
@@ -263,6 +252,9 @@ def Router(self, queues=None, create_missing=None):
self.app.either('CELERY_CREATE_MISSING_QUEUES',
create_missing), app=self.app)
+ def flush_routes(self):
+ self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES)
+
def TaskConsumer(self, channel, queues=None, accept=None, **kw):
if accept is None:
accept = self.app.conf.CELERY_ACCEPT_CONTENT
@@ -272,45 +264,6 @@ def TaskConsumer(self, channel, queues=None, accept=None, **kw):
**kw
)
- @cached_property
- def default_queue(self):
- return self.queues[self.app.conf.CELERY_DEFAULT_QUEUE]
-
- @cached_property
- def queues(self):
- """Queue name⇒ declaration mapping."""
- return self.Queues(self.app.conf.CELERY_QUEUES)
-
- @queues.setter # noqa
- def queues(self, queues):
- return self.Queues(queues)
-
- @property
- def routes(self):
- if self._rtable is None:
- self.flush_routes()
- return self._rtable
-
- @cached_property
- def router(self):
- return self.Router()
-
- @property
- def producer_pool(self):
- if self._producer_pool is None:
- self._producer_pool = ProducerPool(
- self.app.pool,
- limit=self.app.pool.limit,
- Producer=self.Producer,
- )
- return self._producer_pool
- publisher_pool = producer_pool # compat alias
-
- @cached_property
- def default_exchange(self):
- return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE,
- self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE)
-
def as_task_v2(self, task_id, name, args=None, kwargs=None,
countdown=None, eta=None, group_id=None,
expires=None, retries=0, chord=None,
@@ -519,8 +472,43 @@ def publish_task(producer, name, message,
return publish_task
@cached_property
- def send_task_message(self):
- return self._create_task_sender()
+ def default_queue(self):
+ return self.queues[self.app.conf.CELERY_DEFAULT_QUEUE]
+
+ @cached_property
+ def queues(self):
+ """Queue name⇒ declaration mapping."""
+ return self.Queues(self.app.conf.CELERY_QUEUES)
+
+ @queues.setter # noqa
+ def queues(self, queues):
+ return self.Queues(queues)
+
+ @property
+ def routes(self):
+ if self._rtable is None:
+ self.flush_routes()
+ return self._rtable
+
+ @cached_property
+ def router(self):
+ return self.Router()
+
+ @property
+ def producer_pool(self):
+ if self._producer_pool is None:
+ self._producer_pool = ProducerPool(
+ self.app.pool,
+ limit=self.app.pool.limit,
+ Producer=self.Producer,
+ )
+ return self._producer_pool
+ publisher_pool = producer_pool # compat alias
+
+ @cached_property
+ def default_exchange(self):
+ return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE,
+ self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE)
@cached_property
def utc(self):
From 5d40a9dfd19d9994afe657bc62f278aeb20d1eea Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 28 Mar 2014 13:49:24 +0000
Subject: [PATCH 0039/1103] ColorFormatter should not modify record.msg.
Closes #1939
---
celery/tests/app/test_log.py | 24 ++++++++++++++----------
celery/utils/log.py | 28 +++++++++++++++++-----------
2 files changed, 31 insertions(+), 21 deletions(-)
diff --git a/celery/tests/app/test_log.py b/celery/tests/app/test_log.py
index f430d8b5b..588e39bee 100644
--- a/celery/tests/app/test_log.py
+++ b/celery/tests/app/test_log.py
@@ -97,7 +97,7 @@ class test_ColorFormatter(AppCase):
@patch('celery.utils.log.safe_str')
@patch('logging.Formatter.formatException')
def test_formatException_not_string(self, fe, safe_str):
- x = ColorFormatter('HELLO')
+ x = ColorFormatter()
value = KeyError()
fe.return_value = value
self.assertIs(x.formatException(value), value)
@@ -106,16 +106,19 @@ def test_formatException_not_string(self, fe, safe_str):
@patch('logging.Formatter.formatException')
@patch('celery.utils.log.safe_str')
- def test_formatException_string(self, safe_str, fe, value='HELLO'):
- x = ColorFormatter(value)
- fe.return_value = value
- self.assertTrue(x.formatException(value))
+ def test_formatException_string(self, safe_str, fe):
+ x = ColorFormatter()
+ fe.return_value = 'HELLO'
+ try:
+ raise Exception()
+ except Exception:
+ self.assertTrue(x.formatException(sys.exc_info()))
if sys.version_info[0] == 2:
self.assertTrue(safe_str.called)
@patch('logging.Formatter.format')
def test_format_object(self, _format):
- x = ColorFormatter(object())
+ x = ColorFormatter()
x.use_color = True
record = Mock()
record.levelname = 'ERROR'
@@ -124,7 +127,7 @@ def test_format_object(self, _format):
@patch('celery.utils.log.safe_str')
def test_format_raises(self, safe_str):
- x = ColorFormatter('HELLO')
+ x = ColorFormatter()
def on_safe_str(s):
try:
@@ -136,6 +139,7 @@ def on_safe_str(s):
class Record(object):
levelname = 'ERROR'
msg = 'HELLO'
+ exc_info = 1
exc_text = 'error text'
stack_info = None
@@ -148,15 +152,15 @@ def getMessage(self):
record = Record()
safe_str.return_value = record
- x.format(record)
- self.assertIn(''.format(
+ type(msg), exc
+ ),
+ )
+ try:
+ return logging.Formatter.format(self, record)
+ finally:
+ record.msg, record.exc_info = prev_msg, einfo
else:
- return safe_str(sformat(self, record))
+ return safe_str(msg)
class LoggingProxy(object):
From 0ffe314b251628bac2e82d611778998ef8057bfe Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 28 Mar 2014 19:53:11 +0000
Subject: [PATCH 0040/1103] Tests: Make sure all threads get the current app
trap
---
celery/_state.py | 5 +++++
celery/app/base.py | 4 ++--
celery/tests/backends/test_mongodb.py | 4 ++--
celery/tests/case.py | 17 ++++++++++-------
4 files changed, 19 insertions(+), 11 deletions(-)
diff --git a/celery/_state.py b/celery/_state.py
index 80a3b112d..e599a0fb9 100644
--- a/celery/_state.py
+++ b/celery/_state.py
@@ -67,6 +67,11 @@ def _get_current_app():
))
return _tls.current_app or default_app
+
+def _set_current_app(app):
+ _tls.current_app = app
+
+
C_STRICT_APP = os.environ.get('C_STRICT_APP')
if os.environ.get('C_STRICT_APP'): # pragma: no cover
def get_current_app():
diff --git a/celery/app/base.py b/celery/app/base.py
index c934a7a94..914b3204a 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -26,7 +26,7 @@
from celery import platforms
from celery import signals
from celery._state import (
- _task_stack, _tls, get_current_app, set_default_app,
+ _task_stack, get_current_app, _set_current_app, set_default_app,
_register_app, get_current_worker_task,
)
from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured
@@ -147,7 +147,7 @@ def __init__(self, main=None, loader=None, backend=None,
_register_app(self)
def set_current(self):
- _tls.current_app = self
+ _set_current_app(self)
def set_default(self):
set_default_app(self)
diff --git a/celery/tests/backends/test_mongodb.py b/celery/tests/backends/test_mongodb.py
index e260d87f0..f7546d31e 100644
--- a/celery/tests/backends/test_mongodb.py
+++ b/celery/tests/backends/test_mongodb.py
@@ -298,7 +298,7 @@ def test_cleanup(self, mock_get_database):
self.backend.taskmeta_collection = MONGODB_COLLECTION
mock_database = MagicMock(spec=['__getitem__', '__setitem__'])
- mock_collection = Mock()
+ self.backend.collections = mock_collection = Mock()
mock_get_database.return_value = mock_database
mock_database.__getitem__.return_value = mock_collection
@@ -309,7 +309,7 @@ def test_cleanup(self, mock_get_database):
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(
MONGODB_COLLECTION)
- mock_collection.assert_called_once_with()
+ self.assertTrue(mock_collection.remove.called)
def test_get_database_authfailure(self):
x = MongoBackend(app=self.app)
diff --git a/celery/tests/case.py b/celery/tests/case.py
index c96fd8ec0..808347817 100644
--- a/celery/tests/case.py
+++ b/celery/tests/case.py
@@ -412,8 +412,12 @@ def setUp(self):
self._current_app = current_app()
self._default_app = _state.default_app
trap = Trap()
+ self._prev_tls = _state._tls
_state.set_default_app(trap)
- _state._tls.current_app = trap
+
+ class NonTLS(object):
+ current_app = trap
+ _state._tls = NonTLS()
self.app = self.Celery(set_as_current=False)
if not self.contained:
@@ -447,13 +451,12 @@ def _teardown_app(self):
if isinstance(backend.client, DummyClient):
backend.client.cache.clear()
backend._cache.clear()
- from celery._state import (
- _tls, set_default_app, _set_task_join_will_block,
- )
- _set_task_join_will_block(False)
+ from celery import _state
+ _state._set_task_join_will_block(False)
- set_default_app(self._default_app)
- _tls.current_app = self._current_app
+ _state.set_default_app(self._default_app)
+ _state._tls = self._prev_tls
+ _state._tls.current_app = self._current_app
if self.app is not self._current_app:
self.app.close()
self.app = None
From bb5194295e0f087d6465ecb114fec72d3f0be52d Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 31 Mar 2014 15:29:15 +0100
Subject: [PATCH 0041/1103] Result: .forget() should also clear local cache
---
celery/result.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/celery/result.py b/celery/result.py
index 901d01933..706a8ec4a 100644
--- a/celery/result.py
+++ b/celery/result.py
@@ -96,6 +96,7 @@ def as_tuple(self):
def forget(self):
"""Forget about (and possibly remove the result of) this task."""
+ self._cache = None
self.backend.forget(self.id)
def revoke(self, connection=None, terminate=False, signal=None,
From d70a4498fb9b988121dab5ea54a04100fbb24071 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 1 Apr 2014 16:43:38 +0100
Subject: [PATCH 0042/1103] Fixes test weirdness
---
celery/tests/tasks/test_tasks.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/celery/tests/tasks/test_tasks.py b/celery/tests/tasks/test_tasks.py
index c01ffc16a..087cd88bc 100644
--- a/celery/tests/tasks/test_tasks.py
+++ b/celery/tests/tasks/test_tasks.py
@@ -128,6 +128,8 @@ def test_retry_kwargs_can_be_empty(self):
self.retry_task_mockapply.push_request()
try:
with self.assertRaises(Retry):
+ import sys
+ sys.exc_clear()
self.retry_task_mockapply.retry(args=[4, 4], kwargs=None)
finally:
self.retry_task_mockapply.pop_request()
From b04cfd5eae44049f081356b651ab9908812b7c78 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 1 Apr 2014 17:00:01 +0100
Subject: [PATCH 0043/1103] Implements ResultSet.backend (Issue #1936)
---
celery/result.py | 6 +++++-
celery/tests/tasks/test_result.py | 2 ++
2 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/celery/result.py b/celery/result.py
index 706a8ec4a..eb7364a84 100644
--- a/celery/result.py
+++ b/celery/result.py
@@ -658,7 +658,7 @@ def iter_native(self, timeout=None, interval=0.5, no_ack=True):
results = self.results
if not results:
return iter([])
- return results[0].backend.get_many(
+ return self.backend.get_many(
set(r.id for r in results),
timeout=timeout, interval=interval, no_ack=no_ack,
)
@@ -720,6 +720,10 @@ def subtasks(self):
def supports_native_join(self):
return self.results[0].supports_native_join
+ @property
+ def backend(self):
+ return self.app.backend if self.app else self.results[0].backend
+
class GroupResult(ResultSet):
"""Like :class:`ResultSet`, but with an associated id.
diff --git a/celery/tests/tasks/test_result.py b/celery/tests/tasks/test_result.py
index dbaf3f4d4..ee3c9bb1a 100644
--- a/celery/tests/tasks/test_result.py
+++ b/celery/tests/tasks/test_result.py
@@ -489,6 +489,7 @@ def test_join_native(self):
subtasks = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), subtasks)
+ ts.app.backend = backend
backend.ids = [subtask.id for subtask in subtasks]
res = ts.join_native()
self.assertEqual(res, list(range(10)))
@@ -526,6 +527,7 @@ def test_iter_native(self):
subtasks = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), subtasks)
+ ts.app.backend = backend
backend.ids = [subtask.id for subtask in subtasks]
self.assertEqual(len(list(ts.iter_native())), 10)
From e07ea51f3a41d912c2fa3731552011556c1abd05 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 1 Apr 2014 17:01:09 +0100
Subject: [PATCH 0044/1103] Task callbacks applied as group means trail
(.children) stored multiple times. Closes #1936. Closes #1943
---
celery/app/builtins.py | 5 +++--
celery/app/trace.py | 26 +++++++++++++++++++++-----
celery/canvas.py | 6 +++---
3 files changed, 27 insertions(+), 10 deletions(-)
diff --git a/celery/app/builtins.py b/celery/app/builtins.py
index a60920840..9240537cf 100644
--- a/celery/app/builtins.py
+++ b/celery/app/builtins.py
@@ -171,7 +171,8 @@ class Group(app.Task):
accept_magic_kwargs = False
_decorated = True
- def run(self, tasks, result, group_id, partial_args):
+ def run(self, tasks, result, group_id, partial_args,
+ add_to_parent=True):
app = self.app
result = result_from_tuple(result, app)
# any partial args are added to all tasks in the group
@@ -186,7 +187,7 @@ def run(self, tasks, result, group_id, partial_args):
[stask.apply_async(group_id=group_id, producer=pub,
add_to_parent=False) for stask in taskit]
parent = get_current_worker_task()
- if parent:
+ if add_to_parent and parent:
parent.add_trail(result)
return result
diff --git a/celery/app/trace.py b/celery/app/trace.py
index b4c271631..45e24c170 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -257,11 +257,27 @@ def trace_task(uuid, args, kwargs, request=None):
try:
# callback tasks must be applied before the result is
# stored, so that result.children is populated.
- group(
- [signature(callback, app=app)
- for callback in task.request.callbacks or []],
- app=app,
- ).apply_async((retval, ))
+
+ # groups are called inline and will store trail
+ # separately, so need to call them separately
+ # so that the trail's not added multiple times :(
+ # (Issue #1936)
+ callbacks = task.request.callbacks
+ if callbacks:
+ if len(task.request.callbacks) > 1:
+ sigs, groups = [], []
+ for sig in callbacks:
+ sig = signature(sig, app=app)
+ if isinstance(sig, group):
+ groups.append(sig)
+ else:
+ sigs.append(sig)
+ for group_ in groups:
+ group.apply_async((retval, ))
+ if sigs:
+ group(sigs).apply_async(retval, )
+ else:
+ signature(callbacks[0], app=app).delay(retval)
if publish_result:
store_result(
uuid, retval, SUCCESS, request=task_request,
diff --git a/celery/canvas.py b/celery/canvas.py
index cabc5070c..5efb75b09 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -477,13 +477,13 @@ def from_dict(self, d, app=None):
task['args'] = task._merge(d['args'])[0]
return group(tasks, app=app, **kwdict(d['options']))
- def apply_async(self, args=(), kwargs=None, **options):
+ def apply_async(self, args=(), kwargs=None, add_to_parent=True, **options):
tasks = _maybe_clone(self.tasks, app=self._app)
if not tasks:
return self.freeze()
type = self.type
- return type(*type.prepare(dict(self.options, **options),
- tasks, args))
+ return type(*type.prepare(dict(self.options, **options), tasks, args),
+ add_to_parent=add_to_parent)
def set_immutable(self, immutable):
for task in self.tasks:
From ec905fadc41f541c92098c037dbd94c7dac5bb6a Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 1 Apr 2014 19:53:57 +0100
Subject: [PATCH 0045/1103] Multi: %n is now the same as %N to be consistent
with "celery worker".
Closes #1938
---
celery/bin/multi.py | 58 +++++++++++++++-------------------
celery/utils/__init__.py | 7 ++--
docs/tutorials/daemonizing.rst | 22 ++++++-------
3 files changed, 40 insertions(+), 47 deletions(-)
diff --git a/celery/bin/multi.py b/celery/bin/multi.py
index 23ff496f7..9e6dacdf7 100644
--- a/celery/bin/multi.py
+++ b/celery/bin/multi.py
@@ -13,19 +13,19 @@
# Pidfiles and logfiles are stored in the current directory
# by default. Use --pidfile and --logfile argument to change
- # this. The abbreviation %N will be expanded to the current
+ # this. The abbreviation %n will be expanded to the current
# node name.
- $ celery multi start Leslie -E --pidfile=/var/run/celery/%N.pid
- --logfile=/var/log/celery/%N.log
+ $ celery multi start Leslie -E --pidfile=/var/run/celery/%n.pid
+ --logfile=/var/log/celery/%n.log
# You need to add the same arguments when you restart,
# as these are not persisted anywhere.
- $ celery multi restart Leslie -E --pidfile=/var/run/celery/%N.pid
- --logfile=/var/run/celery/%N.log
+ $ celery multi restart Leslie -E --pidfile=/var/run/celery/%n.pid
+ --logfile=/var/run/celery/%n.log
# To stop the node, you need to specify the same pidfile.
- $ celery multi stop Leslie --pidfile=/var/run/celery/%N.pid
+ $ celery multi stop Leslie --pidfile=/var/run/celery/%n.pid
# 3 workers, with 3 processes each
$ celery multi start 3 -c 3
@@ -101,6 +101,7 @@
import sys
from collections import defaultdict, namedtuple
+from functools import partial
from subprocess import Popen
from time import sleep
@@ -111,7 +112,8 @@
from celery import VERSION_BANNER
from celery.five import items
from celery.platforms import Pidfile, IS_WINDOWS
-from celery.utils import term, nodesplit
+from celery.utils import term
+from celery.utils import host_format, node_format, nodesplit
from celery.utils.text import pluralize
__all__ = ['MultiTool']
@@ -247,8 +249,8 @@ def start(self, argv, cmd):
self.retcode = int(any(retcodes))
def with_detacher_default_options(self, p):
- _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid')
- _setdefaultopt(p.options, ['--logfile', '-f'], '%N.log')
+ _setdefaultopt(p.options, ['--pidfile', '-p'], '%n.pid')
+ _setdefaultopt(p.options, ['--logfile', '-f'], '%n.log')
p.options.setdefault(
'--cmd',
'-m {0}'.format(celery_exe('worker', '--detach')),
@@ -320,7 +322,7 @@ def note_waiting():
self.note('')
def getpids(self, p, cmd, callback=None):
- _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid')
+ _setdefaultopt(p.options, ['--pidfile', '-p'], '%n.pid')
nodes = []
for node in multi_args(p, cmd):
@@ -491,25 +493,27 @@ def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''):
raise KeyError('No node at index %r' % (ns_name, ))
for name in names:
- this_suffix = suffix
+ hostname = suffix
if '@' in name:
- this_name = options['-n'] = name
- nodename, this_suffix = nodesplit(name)
- name = nodename
+ nodename = options['-n'] = host_format(name)
+ shortname, hostname = nodesplit(nodename)
+ name = shortname
else:
- nodename = '%s%s' % (prefix, name)
- this_name = options['-n'] = '%s@%s' % (nodename, this_suffix)
- expand = abbreviations({'%h': this_name,
- '%n': name,
- '%N': nodename,
- '%d': this_suffix})
+ shortname = '%s%s' % (prefix, name)
+ nodename = options['-n'] = host_format(
+ '{0}@{1}'.format(shortname, hostname),
+ )
+
+ expand = partial(
+ node_format, nodename=nodename, N=shortname, d=hostname,
+ )
argv = ([expand(cmd)] +
[format_opt(opt, expand(value))
for opt, value in items(p.optmerge(name, options))] +
[passthrough])
if append:
argv.append(expand(append))
- yield multi_args_t(this_name, argv, expand, name)
+ yield multi_args_t(nodename, argv, expand, name)
class NamespacedOptionParser(object):
@@ -591,18 +595,6 @@ def parse_ns_range(ns, ranges=False):
return ret
-def abbreviations(mapping):
-
- def expand(S):
- ret = S
- if S is not None:
- for short_opt, long_opt in items(mapping):
- ret = ret.replace(short_opt, long_opt)
- return ret
-
- return expand
-
-
def findsig(args, default=signal.SIGTERM):
for arg in reversed(args):
if len(arg) == 2 and arg[0] == '-':
diff --git a/celery/utils/__init__.py b/celery/utils/__init__.py
index 4045a85f8..7bf6c3137 100644
--- a/celery/utils/__init__.py
+++ b/celery/utils/__init__.py
@@ -352,7 +352,7 @@ def default_nodename(hostname):
def node_format(s, nodename, **extra):
name, host = nodesplit(nodename)
return host_format(
- s, host, n=name or NODENAME_DEFAULT, **extra)
+ s, host, name or NODENAME_DEFAULT, **extra)
def _fmt_process_index(prefix='', default='0'):
@@ -362,9 +362,10 @@ def _fmt_process_index(prefix='', default='0'):
_fmt_process_index_with_prefix = partial(_fmt_process_index, '-', '')
-def host_format(s, host=None, **extra):
+def host_format(s, host=None, name=None, **extra):
host = host or socket.gethostname()
- name, _, domain = host.partition('.')
+ hname, _, domain = host.partition('.')
+ name = name or hname
keys = dict({
'h': host, 'n': name, 'd': domain,
'i': _fmt_process_index, 'I': _fmt_process_index_with_prefix,
diff --git a/docs/tutorials/daemonizing.rst b/docs/tutorials/daemonizing.rst
index 0c644584c..600dd4e8f 100644
--- a/docs/tutorials/daemonizing.rst
+++ b/docs/tutorials/daemonizing.rst
@@ -96,9 +96,9 @@ This is an example configuration for a Python project.
# Extra command-line arguments to the worker
CELERYD_OPTS="--time-limit=300 --concurrency=8"
- # %N will be replaced with the first part of the nodename.
- CELERYD_LOG_FILE="/var/log/celery/%N.log"
- CELERYD_PID_FILE="/var/run/celery/%N.pid"
+ # %n will be replaced with the first part of the nodename.
+ CELERYD_LOG_FILE="/var/log/celery/%n.log"
+ CELERYD_PID_FILE="/var/run/celery/%n.pid"
# Workers should run as an unprivileged user.
# You need to create this user manually (or you can choose
@@ -153,10 +153,10 @@ Available options
directory.
* CELERYD_PID_FILE
- Full path to the PID file. Default is /var/run/celery/%N.pid
+ Full path to the PID file. Default is /var/run/celery/%n.pid
* CELERYD_LOG_FILE
- Full path to the worker log file. Default is /var/log/celery/%N.log
+ Full path to the worker log file. Default is /var/log/celery/%n.log
* CELERYD_LOG_LEVEL
Worker log level. Default is INFO.
@@ -311,9 +311,9 @@ This is an example configuration for a Python project:
# Extra command-line arguments to the worker
CELERYD_OPTS="--time-limit=300 --concurrency=8"
- # %N will be replaced with the first part of the nodename.
- CELERYD_LOG_FILE="/var/log/celery/%N.log"
- CELERYD_PID_FILE="/var/run/celery/%N.pid"
+ # %n will be replaced with the first part of the nodename.
+ CELERYD_LOG_FILE="/var/log/celery/%n.log"
+ CELERYD_PID_FILE="/var/run/celery/%n.pid"
.. _generic-systemd-celeryd-django-example:
@@ -339,9 +339,9 @@ This is an example configuration for those using `django-celery`:
# Extra command-line arguments to the worker
CELERYD_OPTS="--time-limit=300 --concurrency=8"
- # %N will be replaced with the first part of the nodename.
- CELERYD_LOG_FILE="/var/log/celery/%N.log"
- CELERYD_PID_FILE="/var/run/celery/%N.pid"
+ # %n will be replaced with the first part of the nodename.
+ CELERYD_LOG_FILE="/var/log/celery/%n.log"
+ CELERYD_PID_FILE="/var/run/celery/%n.pid"
To add an environment variable such as DJANGO_SETTINGS_MODULE use the
Environment in celery.service.
From be65e4ad25239348b23c0ab29d295da0aa131631 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 2 Apr 2014 15:08:02 +0100
Subject: [PATCH 0046/1103] More tests for empty groups
---
celery/tests/tasks/test_canvas.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/celery/tests/tasks/test_canvas.py b/celery/tests/tasks/test_canvas.py
index 8ecbbbbc9..4c071a8a1 100644
--- a/celery/tests/tasks/test_canvas.py
+++ b/celery/tests/tasks/test_canvas.py
@@ -273,6 +273,9 @@ def test_from_dict(self):
def test_call_empty_group(self):
x = group(app=self.app)
self.assertFalse(len(x()))
+ x.delay()
+ x.apply_async()
+ x()
def test_skew(self):
g = group([self.add.s(i, i) for i in range(10)])
From 8d8f26d02bd8710832931949b78b7619cef66273 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 2 Apr 2014 15:08:17 +0100
Subject: [PATCH 0047/1103] Multi doc update
---
celery/bin/multi.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/celery/bin/multi.py b/celery/bin/multi.py
index 9e6dacdf7..1ceb50356 100644
--- a/celery/bin/multi.py
+++ b/celery/bin/multi.py
@@ -47,6 +47,9 @@
# specify fully qualified nodenames
$ celery multi start foo@worker.example.com bar@worker.example.com -c 3
+ # fully qualified nodenames but using the current hostname
+ $ celery multi start foo@%h bar@%h
+
# Advanced example starting 10 workers in the background:
# * Three of the workers processes the images and video queue
# * Two of the workers processes the data queue with loglevel DEBUG
From 2bcb3357cea139bac46e6ca044dd9dc33a74e66b Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 2 Apr 2014 15:15:46 +0100
Subject: [PATCH 0048/1103] get_backend_cls cannot be memoized as it keeps
reference to app.loader. Issue #1949
---
celery/backends/__init__.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/celery/backends/__init__.py b/celery/backends/__init__.py
index 421f7f480..fbe8a9c62 100644
--- a/celery/backends/__init__.py
+++ b/celery/backends/__init__.py
@@ -16,7 +16,6 @@
from celery._state import current_app
from celery.five import reraise
from celery.utils.imports import symbol_by_name
-from celery.utils.functional import memoize
__all__ = ['get_backend_cls', 'get_backend_by_url']
@@ -41,7 +40,6 @@
default_backend = Proxy(lambda: current_app.backend)
-@memoize(100)
def get_backend_cls(backend=None, loader=None):
"""Get backend class by name/alias"""
backend = backend or 'disabled'
From 283a75690122ed65b5ddfd11d98b0d98e99ed534 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 2 Apr 2014 16:00:25 +0100
Subject: [PATCH 0049/1103] Fixes multi tests
---
celery/bin/multi.py | 1 +
celery/tests/bin/test_multi.py | 13 ++-----------
2 files changed, 3 insertions(+), 11 deletions(-)
diff --git a/celery/bin/multi.py b/celery/bin/multi.py
index 1ceb50356..2f2055ece 100644
--- a/celery/bin/multi.py
+++ b/celery/bin/multi.py
@@ -509,6 +509,7 @@ def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''):
expand = partial(
node_format, nodename=nodename, N=shortname, d=hostname,
+ h=nodename,
)
argv = ([expand(cmd)] +
[format_opt(opt, expand(value))
diff --git a/celery/tests/bin/test_multi.py b/celery/tests/bin/test_multi.py
index 0b2ecd981..76a6c1b64 100644
--- a/celery/tests/bin/test_multi.py
+++ b/celery/tests/bin/test_multi.py
@@ -8,7 +8,6 @@
main,
MultiTool,
findsig,
- abbreviations,
parse_ns_range,
format_opt,
quote,
@@ -30,14 +29,6 @@ def test_findsig(self):
self.assertEqual(findsig(['-s']), signal.SIGTERM)
self.assertEqual(findsig(['-log']), signal.SIGTERM)
- def test_abbreviations(self):
- expander = abbreviations({'%s': 'START',
- '%x': 'STOP'})
- self.assertEqual(expander('foo%s'), 'fooSTART')
- self.assertEqual(expander('foo%x'), 'fooSTOP')
- self.assertEqual(expander('foo%y'), 'foo%y')
- self.assertIsNone(expander(None))
-
def test_parse_ns_range(self):
self.assertEqual(parse_ns_range('1-3', True), ['1', '2', '3'])
self.assertEqual(parse_ns_range('1-3', False), ['1-3'])
@@ -78,6 +69,7 @@ class test_multi_args(AppCase):
@patch('socket.gethostname')
def test_parse(self, gethostname):
+ gethostname.return_value = 'example.com'
p = NamespacedOptionParser([
'-c:jerry,elaine', '5',
'--loglevel:kramer=DEBUG',
@@ -120,12 +112,11 @@ def assert_line_in(name, args):
)
expand = names[0][2]
self.assertEqual(expand('%h'), '*P*jerry@*S*')
- self.assertEqual(expand('%n'), 'jerry')
+ self.assertEqual(expand('%n'), '*P*jerry')
names2 = list(multi_args(p, cmd='COMMAND', append='',
prefix='*P*', suffix='*S*'))
self.assertEqual(names2[0][1][-1], '-- .disable_rate_limits=1')
- gethostname.return_value = 'example.com'
p2 = NamespacedOptionParser(['10', '-c:1', '5'])
names3 = list(multi_args(p2, cmd='COMMAND'))
self.assertEqual(len(names3), 10)
From 63a69938dca692ae95d720a58f14ff63562b105a Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 2 Apr 2014 16:01:01 +0100
Subject: [PATCH 0050/1103] Fixes backend tests for Issue #1949
---
celery/tests/backends/test_backends.py | 9 ---------
1 file changed, 9 deletions(-)
diff --git a/celery/tests/backends/test_backends.py b/celery/tests/backends/test_backends.py
index bba612770..c6a936b93 100644
--- a/celery/tests/backends/test_backends.py
+++ b/celery/tests/backends/test_backends.py
@@ -19,15 +19,6 @@ def test_get_backend_aliases(self):
expect_cls,
)
- def test_get_backend_cache(self):
- backends.get_backend_cls.clear()
- hits = backends.get_backend_cls.hits
- misses = backends.get_backend_cls.misses
- self.assertTrue(backends.get_backend_cls('amqp', self.app.loader))
- self.assertEqual(backends.get_backend_cls.misses, misses + 1)
- self.assertTrue(backends.get_backend_cls('amqp', self.app.loader))
- self.assertEqual(backends.get_backend_cls.hits, hits + 1)
-
def test_unknown_backend(self):
with self.assertRaises(ImportError):
backends.get_backend_cls('fasodaopjeqijwqe', self.app.loader)
From 9384218ba5c2bda496fd1be220eb4ef0f1d50f58 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 2 Apr 2014 16:01:59 +0100
Subject: [PATCH 0051/1103] Fixes memory leak when app is registered in
multiprocessing after fork registry. Closes #1949
---
celery/app/base.py | 27 ++++++++++++++++++++++++++-
1 file changed, 26 insertions(+), 1 deletion(-)
diff --git a/celery/app/base.py b/celery/app/base.py
index 914b3204a..a134cb84c 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -58,6 +58,8 @@
Please set this variable and make it point to
a configuration module."""
+_after_fork_registered = False
+
def app_has_custom(app, attr):
return mro_lookup(app.__class__, attr, stop=(Celery, object),
@@ -70,6 +72,29 @@ def _unpickle_appattr(reverse_name, args):
return get_current_app()._rgetattr(reverse_name)(*args)
+def _global_after_fork():
+ # Previously every app would call:
+ # `register_after_fork(app, app._after_fork)`
+ # but this created a leak as `register_after_fork` stores concrete object
+ # references and once registered an object cannot be removed without
+ # touching and iterating over the private afterfork registry list.
+ #
+ # See Issue #1949
+ from celery import _state
+ from multiprocessing.util import info
+ for app in _state.apps:
+ try:
+ app._after_fork()
+ except Exception as exc:
+ info('after forker raised exception: %r' % (exc, ), exc_info=1)
+
+
+def _ensure_after_fork():
+ global _after_fork_registered
+ _after_fork_registered = True
+ register_after_fork(_global_after_fork, _global_after_fork)
+
+
class Celery(object):
#: This is deprecated, use :meth:`reduce_keys` instead
Pickler = AppPickler
@@ -590,7 +615,7 @@ def TaskSetResult(self): # XXX compat
@property
def pool(self):
if self._pool is None:
- register_after_fork(self, self._after_fork)
+ _ensure_after_fork()
limit = self.conf.BROKER_POOL_LIMIT
self._pool = self.connection().Pool(limit=limit)
return self._pool
From f9d2f21e6b585c707a82c9f10086a3ea101d4e4d Mon Sep 17 00:00:00 2001
From: Ankur Dedania
Date: Mon, 7 Apr 2014 13:16:45 -0500
Subject: [PATCH 0052/1103] Update periodic-tasks.rst
Midnight added as an occurance
---
docs/userguide/periodic-tasks.rst | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/docs/userguide/periodic-tasks.rst b/docs/userguide/periodic-tasks.rst
index 6b829887f..92d065b6d 100644
--- a/docs/userguide/periodic-tasks.rst
+++ b/docs/userguide/periodic-tasks.rst
@@ -188,7 +188,8 @@ The syntax of these crontab expressions are very flexible. Some examples:
| ``crontab(minute=0, hour=0)`` | Execute daily at midnight. |
+-----------------------------------------+--------------------------------------------+
| ``crontab(minute=0, hour='*/3')`` | Execute every three hours: |
-| | 3am, 6am, 9am, noon, 3pm, 6pm, 9pm. |
+| | midnight, 3am, 6am, 9am, |
+| | noon, 3pm, 6pm, 9pm. |
+-----------------------------------------+--------------------------------------------+
| ``crontab(minute=0,`` | Same as previous. |
| ``hour='0,3,6,9,12,15,18,21')`` | |
From 9dbda6a5f9a6f6b7e9b304858cc82bcb8faf770b Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 8 Apr 2014 13:39:27 +0100
Subject: [PATCH 0053/1103] Docs: generic celerybeat options should not write
schedule to root owned directory. Thanks to Nikos Fertakis.
---
docs/tutorials/daemonizing.rst | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/docs/tutorials/daemonizing.rst b/docs/tutorials/daemonizing.rst
index 600dd4e8f..6ba461ee0 100644
--- a/docs/tutorials/daemonizing.rst
+++ b/docs/tutorials/daemonizing.rst
@@ -211,7 +211,7 @@ This is an example configuration for a Python project:
CELERYBEAT_CHDIR="/opt/Myproject/"
# Extra arguments to celerybeat
- CELERYBEAT_OPTS="--schedule=/var/run/celerybeat-schedule"
+ CELERYBEAT_OPTS="--schedule=/var/run/celery/celerybeat-schedule"
.. _generic-initd-celerybeat-django-example:
@@ -265,7 +265,7 @@ Available options
* CELERY_CREATE_LOGDIR
Always create logfile directory. By default only enable when no custom
logfile location set.
-
+
.. _daemon-systemd-generic:
Usage systemd
@@ -279,10 +279,10 @@ Service file: celery.service
:Usage: `systemctl {start|stop|restart|status} celery.service`
:Configuration file: /etc/conf.d/celery
-To create a temporary folders for the log and pid files change user and group in
+To create a temporary folders for the log and pid files change user and group in
/usr/lib/tmpfiles.d/celery.conf.
-To configure user, group, chdir change settings User, Group and WorkingDirectory defines
-in /usr/lib/systemd/system/celery.service.
+To configure user, group, chdir change settings User, Group and WorkingDirectory defines
+in /usr/lib/systemd/system/celery.service.
.. _generic-systemd-celery-example:
From 574559dd435c1303bfdc06e78211771c241ee0f8 Mon Sep 17 00:00:00 2001
From: Matthew Duggan
Date: Wed, 9 Apr 2014 14:09:29 +0900
Subject: [PATCH 0054/1103] Add option to worker to control heartbeat interval.
Thanks to my colleague Craig Northway for the patch.
---
celery/bin/worker.py | 5 +++++
celery/tests/bin/test_worker.py | 4 +++-
celery/tests/worker/test_consumer.py | 21 ++++++++++++++++++++-
celery/worker/consumer.py | 7 +++++--
4 files changed, 33 insertions(+), 4 deletions(-)
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index bdc564d4f..44be17e4d 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -86,6 +86,10 @@
Do not send event heartbeats.
+.. cmdoption:: --heartbeat-interval
+
+ Interval in seconds at which to send worker heartbeat
+
.. cmdoption:: --purge
Purges all waiting tasks before the daemon is started.
@@ -245,6 +249,7 @@ def get_options(self):
Option('--without-gossip', action='store_true', default=False),
Option('--without-mingle', action='store_true', default=False),
Option('--without-heartbeat', action='store_true', default=False),
+ Option('--heartbeat-interval', type='int'),
Option('-O', dest='optimization'),
Option('-D', '--detach', action='store_true'),
) + daemon_options() + tuple(self.app.user_options['worker'])
diff --git a/celery/tests/bin/test_worker.py b/celery/tests/bin/test_worker.py
index fbb7c52c2..e4ebf7157 100644
--- a/celery/tests/bin/test_worker.py
+++ b/celery/tests/bin/test_worker.py
@@ -443,8 +443,10 @@ def test_set_process_status(self):
def test_parse_options(self):
cmd = worker()
cmd.app = self.app
- opts, args = cmd.parse_options('worker', ['--concurrency=512'])
+ opts, args = cmd.parse_options('worker', ['--concurrency=512',
+ '--heartbeat-interval=10'])
self.assertEqual(opts.concurrency, 512)
+ self.assertEqual(opts.heartbeat_interval, 10)
@disable_stdouts
def test_main(self):
diff --git a/celery/tests/worker/test_consumer.py b/celery/tests/worker/test_consumer.py
index 81199b85d..b9962a49c 100644
--- a/celery/tests/worker/test_consumer.py
+++ b/celery/tests/worker/test_consumer.py
@@ -164,11 +164,30 @@ def test_start(self):
with patch('celery.worker.heartbeat.Heart') as hcls:
h = Heart(c)
self.assertTrue(h.enabled)
+ self.assertEqual(h.heartbeat_interval, None)
self.assertIsNone(c.heart)
h.start(c)
self.assertTrue(c.heart)
- hcls.assert_called_with(c.timer, c.event_dispatcher)
+ hcls.assert_called_with(c.timer, c.event_dispatcher,
+ h.heartbeat_interval)
+ c.heart.start.assert_called_with()
+
+ def test_start_heartbeat_interval(self):
+ c = Mock()
+ c.timer = Mock()
+ c.event_dispatcher = Mock()
+
+ with patch('celery.worker.heartbeat.Heart') as hcls:
+ h = Heart(c, False, 20)
+ self.assertTrue(h.enabled)
+ self.assertEqual(h.heartbeat_interval, 20)
+ self.assertIsNone(c.heart)
+
+ h.start(c)
+ self.assertTrue(c.heart)
+ hcls.assert_called_with(c.timer, c.event_dispatcher,
+ h.heartbeat_interval)
c.heart.start.assert_called_with()
diff --git a/celery/worker/consumer.py b/celery/worker/consumer.py
index 16f0b2ff5..22d768649 100644
--- a/celery/worker/consumer.py
+++ b/celery/worker/consumer.py
@@ -534,12 +534,15 @@ def shutdown(self, c):
class Heart(bootsteps.StartStopStep):
requires = (Events, )
- def __init__(self, c, without_heartbeat=False, **kwargs):
+ def __init__(self, c, without_heartbeat=False, heartbeat_interval=None,
+ **kwargs):
self.enabled = not without_heartbeat
+ self.heartbeat_interval = heartbeat_interval
c.heart = None
def start(self, c):
- c.heart = heartbeat.Heart(c.timer, c.event_dispatcher)
+ c.heart = heartbeat.Heart(c.timer, c.event_dispatcher,
+ self.heartbeat_interval)
c.heart.start()
def stop(self, c):
From 547b2530866629c878d0594f959223b3229e3821 Mon Sep 17 00:00:00 2001
From: dtheodor
Date: Wed, 9 Apr 2014 11:07:40 +0200
Subject: [PATCH 0055/1103] Update task.py
---
celery/app/task.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/app/task.py b/celery/app/task.py
index 48a5b2be2..e8a4697a2 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -313,7 +313,7 @@ class Task(object):
#: :setting:`CELERY_ACKS_LATE` setting.
acks_late = None
- #: List/tuple of expected exceptions.
+ #: Tuple of expected exceptions.
#:
#: These are errors that are expected in normal operation
#: and that should not be regarded as a real error by the worker.
From 606e7ee90e178348b777ed4653af5d3d5b3dc806 Mon Sep 17 00:00:00 2001
From: dtheodor
Date: Wed, 9 Apr 2014 11:08:12 +0200
Subject: [PATCH 0056/1103] Update tasks.rst
---
docs/userguide/tasks.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/userguide/tasks.rst b/docs/userguide/tasks.rst
index b984d5480..06351d5da 100644
--- a/docs/userguide/tasks.rst
+++ b/docs/userguide/tasks.rst
@@ -472,7 +472,7 @@ General
.. attribute:: Task.throws
- Optional list of expected error classes that should not be regarded
+ Optional tuple of expected error classes that should not be regarded
as an actual error.
Errors in this list will be reported as a failure to the result backend,
From 7cbb7755c73db68963d3be0019f6498dc8056ae7 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 9 Apr 2014 15:49:56 +0100
Subject: [PATCH 0057/1103] Try to fix CI tests
---
celery/tests/tasks/test_tasks.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/celery/tests/tasks/test_tasks.py b/celery/tests/tasks/test_tasks.py
index 087cd88bc..5607c255d 100644
--- a/celery/tests/tasks/test_tasks.py
+++ b/celery/tests/tasks/test_tasks.py
@@ -129,7 +129,10 @@ def test_retry_kwargs_can_be_empty(self):
try:
with self.assertRaises(Retry):
import sys
- sys.exc_clear()
+ try:
+ sys.exc_clear()
+ except AttributeError:
+ pass
self.retry_task_mockapply.retry(args=[4, 4], kwargs=None)
finally:
self.retry_task_mockapply.pop_request()
From 2f110d2f13204d413d14d366e9ff2d61427396f2 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 9 Apr 2014 16:03:49 +0100
Subject: [PATCH 0058/1103] Make celery.five importable from outside. Closes
#1968
---
celery/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/__init__.py b/celery/__init__.py
index 848907cf3..7f5c7a7c4 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -144,7 +144,7 @@ def maybe_patch_concurrency(argv=sys.argv,
__package__='celery', __file__=__file__,
__path__=__path__, __doc__=__doc__, __version__=__version__,
__author__=__author__, __contact__=__contact__,
- __homepage__=__homepage__, __docformat__=__docformat__,
+ __homepage__=__homepage__, __docformat__=__docformat__, five=five,
VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
maybe_patch_concurrency=maybe_patch_concurrency,
_find_option_with_arg=_find_option_with_arg,
From 0dcb8fe0b1ee822e8e8921263fe850557b9e19c5 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 9 Apr 2014 18:21:42 +0100
Subject: [PATCH 0059/1103] Fixes tests
---
celery/__init__.py | 4 ++--
celery/five.py | 6 +++---
celery/task/__init__.py | 4 ++--
celery/tests/app/test_loaders.py | 2 ++
4 files changed, 9 insertions(+), 7 deletions(-)
diff --git a/celery/__init__.py b/celery/__init__.py
index 7f5c7a7c4..ddf8af385 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -127,9 +127,9 @@ def maybe_patch_concurrency(argv=sys.argv,
concurrency.get_implementation(pool)
# Lazy loading
-from .five import recreate_module
+from celery import five
-old_module, new_module = recreate_module( # pragma: no cover
+old_module, new_module = five.recreate_module( # pragma: no cover
__name__,
by_module={
'celery.app': ['Celery', 'bugreport', 'shared_task'],
diff --git a/celery/five.py b/celery/five.py
index 99ecc28e3..56c640ac8 100644
--- a/celery/five.py
+++ b/celery/five.py
@@ -295,7 +295,7 @@ def reclassmethod(method):
return classmethod(fun_of_method(method))
-class MagicModule(ModuleType):
+class LazyModule(ModuleType):
_compat_modules = ()
_all_by_module = {}
_direct = {}
@@ -321,7 +321,7 @@ def __reduce__(self):
def create_module(name, attrs, cls_attrs=None, pkg=None,
- base=MagicModule, prepare_attr=None):
+ base=LazyModule, prepare_attr=None):
fqdn = '.'.join([pkg.__name__, name]) if pkg else name
cls_attrs = {} if cls_attrs is None else cls_attrs
pkg, _, modname = name.rpartition('.')
@@ -337,7 +337,7 @@ def create_module(name, attrs, cls_attrs=None, pkg=None,
def recreate_module(name, compat_modules=(), by_module={}, direct={},
- base=MagicModule, **attrs):
+ base=LazyModule, **attrs):
old_module = sys.modules[name]
origins = get_origins(by_module)
compat_modules = COMPAT_MODULES.get(name, ())
diff --git a/celery/task/__init__.py b/celery/task/__init__.py
index f8326e887..4ab1a2feb 100644
--- a/celery/task/__init__.py
+++ b/celery/task/__init__.py
@@ -12,7 +12,7 @@
from __future__ import absolute_import
from celery._state import current_app, current_task as current
-from celery.five import MagicModule, recreate_module
+from celery.five import LazyModule, recreate_module
from celery.local import Proxy
__all__ = [
@@ -32,7 +32,7 @@
from .sets import TaskSet
-class module(MagicModule):
+class module(LazyModule):
def __call__(self, *args, **kwargs):
return self.task(*args, **kwargs)
diff --git a/celery/tests/app/test_loaders.py b/celery/tests/app/test_loaders.py
index f1b1bb037..ab69e501d 100644
--- a/celery/tests/app/test_loaders.py
+++ b/celery/tests/app/test_loaders.py
@@ -206,9 +206,11 @@ def test_import_from_cwd(self):
except ValueError:
pass
celery = sys.modules.pop('celery', None)
+ sys.modules.pop('celery.five', None)
try:
self.assertTrue(l.import_from_cwd('celery'))
sys.modules.pop('celery', None)
+ sys.modules.pop('celery.five', None)
sys.path.insert(0, os.getcwd())
self.assertTrue(l.import_from_cwd('celery'))
finally:
From fb48b1f357f7a416d1413d0056158a74191185af Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 10 Apr 2014 15:37:20 +0100
Subject: [PATCH 0060/1103] Fixes test for #1964
---
celery/tests/backends/test_mongodb.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/celery/tests/backends/test_mongodb.py b/celery/tests/backends/test_mongodb.py
index f7546d31e..a32d9ed27 100644
--- a/celery/tests/backends/test_mongodb.py
+++ b/celery/tests/backends/test_mongodb.py
@@ -196,9 +196,10 @@ def test_get_task_meta_for(self, mock_get_database):
mock_get_database.assert_called_once_with()
mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION)
self.assertEqual(
- ['status', 'task_id', 'date_done', 'traceback', 'result',
- 'children'],
- list(ret_val.keys()))
+ list(sorted(['status', 'task_id', 'date_done', 'traceback',
+ 'result', 'children'])),
+ list(sorted(ret_val.keys())),
+ )
@patch('celery.backends.mongodb.MongoBackend._get_database')
def test_get_task_meta_for_no_result(self, mock_get_database):
From ae277443cfe381a0c855a67338afe08a34cff1b5 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 10 Apr 2014 15:37:27 +0100
Subject: [PATCH 0061/1103] Wording
---
docs/userguide/optimizing.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/userguide/optimizing.rst b/docs/userguide/optimizing.rst
index 590143c63..459069fd2 100644
--- a/docs/userguide/optimizing.rst
+++ b/docs/userguide/optimizing.rst
@@ -223,5 +223,5 @@ worker option:
$ celery -A proj worker -l info -Ofair
-With this option enabled the worker will only write to workers that are
+With this option enabled the worker will only write to processes that are
available for work, disabling the prefetch behavior.
From 4ec8b25ca96f7982b27174d27cc247a7c1a6fc59 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 14 Apr 2014 21:44:40 +0100
Subject: [PATCH 0062/1103] Adds celery.version_info_t
---
celery/__init__.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/celery/__init__.py b/celery/__init__.py
index ddf8af385..86a3e450f 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -146,6 +146,7 @@ def maybe_patch_concurrency(argv=sys.argv,
__author__=__author__, __contact__=__contact__,
__homepage__=__homepage__, __docformat__=__docformat__, five=five,
VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER,
+ version_info_t=version_info_t,
maybe_patch_concurrency=maybe_patch_concurrency,
_find_option_with_arg=_find_option_with_arg,
)
From ae04f684e9e3c0f92e221ac3b35f148515d2ff61 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 14 Apr 2014 21:46:07 +0100
Subject: [PATCH 0063/1103] Fix for QoS when using RabbitMQ 3.3 or later.
(Issue celery/kombu#339)
---
celery/worker/consumer.py | 22 ++++++++++++++++++++--
1 file changed, 20 insertions(+), 2 deletions(-)
diff --git a/celery/worker/consumer.py b/celery/worker/consumer.py
index 22d768649..3f627edce 100644
--- a/celery/worker/consumer.py
+++ b/celery/worker/consumer.py
@@ -592,11 +592,29 @@ def __init__(self, c, **kwargs):
def start(self, c):
c.update_strategies()
+
+ # - RabbitMQ 3.3 completely redefines how basic_qos works..
+ # This will detect if the new qos smenatics is in effect,
+ # and if so make sure the 'apply_global' flag is set on qos updates.
+ qos_global = not (
+ c.connection.transport.qos_semantics_matches_spec(
+ c.connection.connection))
+
+ # set initial prefetch count
+ c.connection.default_channel.basic_qos(
+ 0, c.initial_prefetch_count, qos_global,
+ )
+
c.task_consumer = c.app.amqp.TaskConsumer(
c.connection, on_decode_error=c.on_decode_error,
)
- c.qos = QoS(c.task_consumer.qos, c.initial_prefetch_count)
- c.qos.update() # set initial prefetch count
+
+ def set_prefetch_count(prefetch_count):
+ return c.task_consumer.qos(
+ prefetch_count=prefetch_count,
+ apply_global=qos_global,
+ )
+ c.qos = QoS(set_prefetch_count, c.initial_prefetch_count)
def stop(self, c):
if c.task_consumer:
From a6f01727abc2e594280cf3e6748cf0e11406528a Mon Sep 17 00:00:00 2001
From: Matthew Duggan
Date: Tue, 15 Apr 2014 10:05:06 +0900
Subject: [PATCH 0064/1103] Add my name to contributors
---
CONTRIBUTORS.txt | 1 +
1 file changed, 1 insertion(+)
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index b11226613..ccfe96062 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -158,3 +158,4 @@ Dan McGee, 2014/01/27
Paul Kilgo, 2014/01/28
Martin Davidsson, 2014/02/08
Chris Clark, 2014/02/20
+Matthew Duggan, 2014/04/10
From 5ed3aa492459f244d19c01f4d7b155d488c590b6 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 15 Apr 2014 14:50:10 +0100
Subject: [PATCH 0065/1103] Changelog stub for 3.1.11
---
Changelog | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/Changelog b/Changelog
index 62abdf694..92b8dd506 100644
--- a/Changelog
+++ b/Changelog
@@ -8,6 +8,13 @@ This document contains change notes for bugfix releases in the 3.1.x series
(Cipater), please see :ref:`whatsnew-3.1` for an overview of what's
new in Celery 3.1.
+.. _version-3.1.11:
+
+3.1.11
+======
+:release-date: 2014-04-XX XX:XX X.X UTC
+:release-by: XXX
+
.. _version-3.1.10:
3.1.10
From 72899335c4b19ba2d7c3520f51fe5fc54e51e526 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 15 Apr 2014 15:31:40 +0100
Subject: [PATCH 0066/1103] Batches: Do not attempt to cancel non-existing
tref. Closes #1984
---
celery/contrib/batches.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/celery/contrib/batches.py b/celery/contrib/batches.py
index 0248ebf8d..8cabc6f61 100644
--- a/celery/contrib/batches.py
+++ b/celery/contrib/batches.py
@@ -226,7 +226,8 @@ def _do_flush(self):
self.flush(requests)
if not requests:
logger.debug('Batches: Cancelling timer: Nothing in buffer.')
- self._tref.cancel() # cancel timer.
+ if self._tref:
+ self._tref.cancel() # cancel timer.
self._tref = None
def apply_buffer(self, requests, args=(), kwargs={}):
From abf1ff963b854e7114ea98c58a4562d49597fbcc Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 15 Apr 2014 17:25:48 +0100
Subject: [PATCH 0067/1103] Use the Connection.qos_semantics_matches_spec
property instead
---
celery/worker/consumer.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/celery/worker/consumer.py b/celery/worker/consumer.py
index 3f627edce..c761d043a 100644
--- a/celery/worker/consumer.py
+++ b/celery/worker/consumer.py
@@ -596,9 +596,7 @@ def start(self, c):
# - RabbitMQ 3.3 completely redefines how basic_qos works..
# This will detect if the new qos smenatics is in effect,
# and if so make sure the 'apply_global' flag is set on qos updates.
- qos_global = not (
- c.connection.transport.qos_semantics_matches_spec(
- c.connection.connection))
+ qos_global = not c.connection.qos_semantics_matches_spec
# set initial prefetch count
c.connection.default_channel.basic_qos(
From daccf8c79107967c524cc3ba3d28b9e209cfe0a4 Mon Sep 17 00:00:00 2001
From: Chris Martin
Date: Tue, 15 Apr 2014 16:54:53 -0400
Subject: [PATCH 0068/1103] Fix typo in comment ("if if")
---
docs/tutorials/task-cookbook.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/tutorials/task-cookbook.rst b/docs/tutorials/task-cookbook.rst
index ad772a751..c47c09a7d 100644
--- a/docs/tutorials/task-cookbook.rst
+++ b/docs/tutorials/task-cookbook.rst
@@ -45,7 +45,7 @@ The cache key expires after some time in case something unexpected happens
feed_url_digest = md5(feed_url).hexdigest()
lock_id = '{0}-lock-{1}'.format(self.name, feed_url_hexdigest)
- # cache.add fails if if the key already exists
+ # cache.add fails if the key already exists
acquire_lock = lambda: cache.add(lock_id, 'true', LOCK_EXPIRE)
# memcache delete is very slow, but we have to use it to take
# advantage of using add() for atomic locking
From 8a812a8a13dadf06d60eade8b7c4d17c928a4687 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 15 Apr 2014 22:16:20 +0100
Subject: [PATCH 0069/1103] Bundle celery[librabbitmq] now depends on
librabbitmq 1.5.0
---
requirements/extras/librabbitmq.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements/extras/librabbitmq.txt b/requirements/extras/librabbitmq.txt
index e3ac1690b..8f9a2dbca 100644
--- a/requirements/extras/librabbitmq.txt
+++ b/requirements/extras/librabbitmq.txt
@@ -1 +1 @@
-librabbitmq>=1.0.2
+librabbitmq>=1.5.0
From a39745199900a8f69998af7de494d496c3878594 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 15 Apr 2014 22:16:54 +0100
Subject: [PATCH 0070/1103] Now depends on kombu 3.0.15
---
requirements/default.txt | 2 +-
setup.cfg | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/requirements/default.txt b/requirements/default.txt
index 9895c2c55..0573e01dc 100644
--- a/requirements/default.txt
+++ b/requirements/default.txt
@@ -1,3 +1,3 @@
pytz>dev
billiard>=3.3.0.14,<3.4
-kombu>=3.0.14,<4.0
+kombu>=3.0.15,<4.0
diff --git a/setup.cfg b/setup.cfg
index c8da3bd33..31cfaeaa4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -12,4 +12,4 @@ upload-dir = docs/.build/html
[bdist_rpm]
requires = pytz >= 2011b
billiard >= 3.3.0.14
- kombu >= 3.0.14
+ kombu >= 3.0.15
From 33fbec9f4d6b6b4d6a63f838e13800dd142f1131 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 16 Apr 2014 14:18:50 +0100
Subject: [PATCH 0071/1103] Now depends on billiard 3.3.0.17
---
requirements/default.txt | 2 +-
setup.cfg | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/requirements/default.txt b/requirements/default.txt
index 0573e01dc..da64babcf 100644
--- a/requirements/default.txt
+++ b/requirements/default.txt
@@ -1,3 +1,3 @@
pytz>dev
-billiard>=3.3.0.14,<3.4
+billiard>=3.3.0.17,<3.4
kombu>=3.0.15,<4.0
diff --git a/setup.cfg b/setup.cfg
index 31cfaeaa4..2a032e4d1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -11,5 +11,5 @@ upload-dir = docs/.build/html
[bdist_rpm]
requires = pytz >= 2011b
- billiard >= 3.3.0.14
+ billiard >= 3.3.0.17
kombu >= 3.0.15
From 40d9c8f652787c89505f64aac415c6f125708346 Mon Sep 17 00:00:00 2001
From: Brian Bouterse
Date: Thu, 10 Apr 2014 13:43:43 -0400
Subject: [PATCH 0072/1103] Stops MongoDB Backend from using BROKER_USE_SSL.
This change effectively disables SSL as set using BROKER_USE_SSL. SSL support
should be functional through existing mechanisms, but requires doc changes and
testing. I've documented this potential feature and the necessary work with
celery/celery#1974.
---
celery/backends/mongodb.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
index c3229d51c..44c1c2252 100644
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -92,7 +92,6 @@ def __init__(self, *args, **kwargs):
self.options = dict(config, **config.pop('options', None) or {})
# Set option defaults
- self.options.setdefault('ssl', self.app.conf.BROKER_USE_SSL)
self.options.setdefault('max_pool_size', self.max_pool_size)
self.options.setdefault('auto_start_request', False)
From c24415d5496acafe86a37be69bb4bb51a9fcf336 Mon Sep 17 00:00:00 2001
From: Brian Bouterse
Date: Thu, 10 Apr 2014 14:20:26 -0400
Subject: [PATCH 0073/1103] Add Brian Bouterse to AUTHORS and CONTRIBUTORS
Conflicts:
CONTRIBUTORS.txt
---
CONTRIBUTORS.txt | 1 +
docs/AUTHORS.txt | 1 +
2 files changed, 2 insertions(+)
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index ccfe96062..edf2f120a 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -159,3 +159,4 @@ Paul Kilgo, 2014/01/28
Martin Davidsson, 2014/02/08
Chris Clark, 2014/02/20
Matthew Duggan, 2014/04/10
+Brian Bouterse, 2014/04/10
diff --git a/docs/AUTHORS.txt b/docs/AUTHORS.txt
index 3d53ce911..8caea46a7 100644
--- a/docs/AUTHORS.txt
+++ b/docs/AUTHORS.txt
@@ -21,6 +21,7 @@ Ben Firshman
Brad Jasper
Branko Čibej
Brendon Crawford
+Brian Bouterse
Brian Rosner
Bryan Berg
Chase Seibert
From 840b0c683660ac37705bae90bb95179d981c0c06 Mon Sep 17 00:00:00 2001
From: Brian Bouterse
Date: Tue, 15 Apr 2014 10:12:31 -0400
Subject: [PATCH 0074/1103] Adding changelog entry
---
Changelog | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/Changelog b/Changelog
index 92b8dd506..7726a13f8 100644
--- a/Changelog
+++ b/Changelog
@@ -15,6 +15,11 @@ new in Celery 3.1.
:release-date: 2014-04-XX XX:XX X.X UTC
:release-by: XXX
+- **MongoDB**: SSL configuration with non-MongoDB transport breaks MongoDB
+ results backend (Issue #1973).
+
+ Fix contributed by Brian Bouterse.
+
.. _version-3.1.10:
3.1.10
From f4658e6082945ec52f30bccabd555981a6707a3d Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 16 Apr 2014 18:44:10 +0100
Subject: [PATCH 0075/1103] Fix for chord inside chord when using json
serializer. Closes #1987
---
celery/backends/base.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/celery/backends/base.py b/celery/backends/base.py
index 437dd4c83..41ce1ef17 100644
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -167,8 +167,10 @@ def exception_to_python(self, exc):
"""Convert serialized exception to Python exception."""
if self.serializer in EXCEPTION_ABLE_CODECS:
return get_pickled_exception(exc)
- return create_exception_cls(
- from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
+ elif not isinstance(exc, BaseException):
+ return create_exception_cls(
+ from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
+ return exc
def prepare_value(self, result):
"""Prepare value for storage."""
From ede043f9bb4a9c2461552db7c967645ab15d5a85 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 16 Apr 2014 18:53:26 +0100
Subject: [PATCH 0076/1103] Tests passing;
---
celery/tests/backends/test_mongodb.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/celery/tests/backends/test_mongodb.py b/celery/tests/backends/test_mongodb.py
index a32d9ed27..f3449f793 100644
--- a/celery/tests/backends/test_mongodb.py
+++ b/celery/tests/backends/test_mongodb.py
@@ -98,7 +98,7 @@ def test_get_connection_no_connection_host(self):
connection = self.backend._get_connection()
mock_Connection.assert_called_once_with(
- host='mongodb://localhost:27017', ssl=False, max_pool_size=10,
+ host='mongodb://localhost:27017', max_pool_size=10,
auto_start_request=False)
self.assertEqual(sentinel.connection, connection)
@@ -113,7 +113,7 @@ def test_get_connection_no_connection_mongodb_uri(self):
connection = self.backend._get_connection()
mock_Connection.assert_called_once_with(
- host=mongodb_uri, ssl=False, max_pool_size=10,
+ host=mongodb_uri, max_pool_size=10,
auto_start_request=False)
self.assertEqual(sentinel.connection, connection)
From ec011029846a23f7a0e932190a6eb4d62ac7f197 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 16 Apr 2014 20:23:10 +0100
Subject: [PATCH 0077/1103] Regression: DEFAULT_DELIVERY_MODE no longer
honored, also use Exchange.delivery_mode if set. Closes #1953
---
celery/app/amqp.py | 6 ++++++
docs/userguide/optimizing.rst | 13 +++++++++++--
2 files changed, 17 insertions(+), 2 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index 6db1d7315..a23f1d63b 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -385,6 +385,7 @@ def as_task_v1(self, task_id, name, args=None, kwargs=None,
def _create_task_sender(self):
default_retry = self.app.conf.CELERY_TASK_PUBLISH_RETRY
default_policy = self.app.conf.CELERY_TASK_PUBLISH_RETRY_POLICY
+ default_delivery_mode = self.app.conf.CELERY_DEFAULT_DELIVERY_MODE
default_queue = self.default_queue
queues = self.queues
send_before_publish = signals.before_task_publish.send
@@ -421,6 +422,11 @@ def publish_task(producer, name, message,
qname, queue = queue, queues[queue]
else:
qname = queue.name
+ if delivery_mode is None:
+ try:
+ delivery_mode = queue.exchange.delivery_mode
+ except AttributeError:
+ delivery_mode = default_delivery_mode
exchange = exchange or queue.exchange.name
routing_key = routing_key or queue.routing_key
if declare is None and queue and not isinstance(queue, Broadcast):
diff --git a/docs/userguide/optimizing.rst b/docs/userguide/optimizing.rst
index 459069fd2..e5ab4b312 100644
--- a/docs/userguide/optimizing.rst
+++ b/docs/userguide/optimizing.rst
@@ -98,11 +98,20 @@ to improve performance:
CELERY_QUEUES = (
Queue('celery', routing_key='celery'),
- Queue('transient', routing_key='transient',
- delivery_mode=1),
+ Queue('transient', Exchange('transient', delivery_mode=1),
+ routing_key='transient', durable=False),
)
+or by using :setting:`CELERY_ROUTES`:
+
+.. code-block:: python
+
+ CELERY_ROUTES = {
+ 'proj.tasks.add': {'queue': 'celery', 'delivery_mode': 'transient'}
+ }
+
+
The ``delivery_mode`` changes how the messages to this queue are delivered.
A value of 1 means that the message will not be written to disk, and a value
of 2 (default) means that the message can be written to disk.
From d028eed5115e03967dfa3fe273fd8c71818a2b0a Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 16 Apr 2014 22:30:15 +0100
Subject: [PATCH 0078/1103] Renames celery.app.builtins.shared_task ->
celery._state.connect_on_app_finalize. Closes #1937
---
celery/_state.py | 19 +++++++++++++++-
celery/app/__init__.py | 9 ++++----
celery/app/base.py | 12 +++++-----
celery/app/builtins.py | 43 +++++++++---------------------------
celery/app/task.py | 4 ++--
celery/tests/app/test_app.py | 2 +-
6 files changed, 41 insertions(+), 48 deletions(-)
diff --git a/celery/_state.py b/celery/_state.py
index e599a0fb9..a76b3f4b7 100644
--- a/celery/_state.py
+++ b/celery/_state.py
@@ -20,7 +20,8 @@
from celery.utils.threads import LocalStack
__all__ = ['set_default_app', 'get_current_app', 'get_current_task',
- 'get_current_worker_task', 'current_app', 'current_task']
+ 'get_current_worker_task', 'current_app', 'current_task',
+ 'connect_on_app_finalize']
#: Global default app used when no current app.
default_app = None
@@ -28,9 +29,25 @@
#: List of all app instances (weakrefs), must not be used directly.
_apps = weakref.WeakSet()
+#: global set of functions to call whenever a new app is finalized
+#: E.g. Shared tasks, and builtin tasks are created
+#: by adding callbacks here.
+_on_app_finalizers = set()
+
_task_join_will_block = False
+def connect_on_app_finalize(callback):
+ _on_app_finalizers.add(callback)
+ return callback
+
+
+def _announce_app_finalized(app):
+ callbacks = set(_on_app_finalizers)
+ for callback in callbacks:
+ callback(app)
+
+
def _set_task_join_will_block(blocks):
global _task_join_will_block
_task_join_will_block = blocks
diff --git a/celery/app/__init__.py b/celery/app/__init__.py
index 426fed6ce..952a8746d 100644
--- a/celery/app/__init__.py
+++ b/celery/app/__init__.py
@@ -13,15 +13,12 @@
from celery.local import Proxy
from celery import _state
from celery._state import (
- set_default_app,
get_current_app as current_app,
get_current_task as current_task,
- _get_active_apps,
- _task_stack,
+ connect_on_app_finalize, set_default_app, _get_active_apps, _task_stack,
)
from celery.utils import gen_task_name
-from .builtins import shared_task as _shared_task
from .base import Celery, AppPickler
__all__ = ['Celery', 'AppPickler', 'default_app', 'app_or_default',
@@ -128,7 +125,9 @@ def __inner(fun):
name = options.get('name')
# Set as shared task so that unfinalized apps,
# and future apps will load the task.
- _shared_task(lambda app: app._task_from_fun(fun, **options))
+ connect_on_app_finalize(
+ lambda app: app._task_from_fun(fun, **options)
+ )
# Force all finalized apps to take this task as well.
for app in _get_active_apps():
diff --git a/celery/app/base.py b/celery/app/base.py
index a134cb84c..22e4a480b 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -27,7 +27,8 @@
from celery import signals
from celery._state import (
_task_stack, get_current_app, _set_current_app, set_default_app,
- _register_app, get_current_worker_task,
+ _register_app, get_current_worker_task, connect_on_app_finalize,
+ _announce_app_finalized,
)
from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured
from celery.five import items, values
@@ -38,7 +39,6 @@
from celery.utils.objects import mro_lookup
from .annotations import prepare as prepare_annotations
-from .builtins import shared_task, load_shared_tasks
from .defaults import DEFAULTS, find_deprecated_settings
from .registry import TaskRegistry
from .utils import (
@@ -208,8 +208,8 @@ def task(self, *args, **opts):
# a differnt task instance. This makes sure it will always use
# the task instance from the current app.
# Really need a better solution for this :(
- from . import shared_task as proxies_to_curapp
- return proxies_to_curapp(*args, _force_evaluate=True, **opts)
+ from . import shared_task
+ return shared_task(*args, _force_evaluate=True, **opts)
def inner_create_task_cls(shared=True, filter=None, **opts):
_filt = filter # stupid 2to3
@@ -218,7 +218,7 @@ def _create_task_cls(fun):
if shared:
cons = lambda app: app._task_from_fun(fun, **opts)
cons.__name__ = fun.__name__
- shared_task(cons)
+ connect_on_app_finalize(cons)
if self.accept_magic_kwargs: # compat mode
task = self._task_from_fun(fun, **opts)
if filter:
@@ -271,7 +271,7 @@ def finalize(self, auto=False):
if auto and not self.autofinalize:
raise RuntimeError('Contract breach: app not finalized')
self.finalized = True
- load_shared_tasks(self)
+ _announce_app_finalized(self)
pending = self._pending
while pending:
diff --git a/celery/app/builtins.py b/celery/app/builtins.py
index 9240537cf..e42e0b25c 100644
--- a/celery/app/builtins.py
+++ b/celery/app/builtins.py
@@ -11,39 +11,16 @@
from collections import deque
-from celery._state import get_current_worker_task
+from celery._state import get_current_worker_task, connect_on_app_finalize
from celery.utils import uuid
from celery.utils.log import get_logger
-__all__ = ['shared_task', 'load_shared_tasks']
+__all__ = []
logger = get_logger(__name__)
-#: global list of functions defining tasks that should be
-#: added to all apps.
-_shared_tasks = set()
-
-def shared_task(constructor):
- """Decorator that specifies a function that generates a built-in task.
-
- The function will then be called for every new app instance created
- (lazily, so more exactly when the task registry for that app is needed).
-
- The function must take a single ``app`` argument.
- """
- _shared_tasks.add(constructor)
- return constructor
-
-
-def load_shared_tasks(app):
- """Create built-in tasks for an app instance."""
- constructors = set(_shared_tasks)
- for constructor in constructors:
- constructor(app)
-
-
-@shared_task
+@connect_on_app_finalize
def add_backend_cleanup_task(app):
"""The backend cleanup task can be used to clean up the default result
backend.
@@ -60,7 +37,7 @@ def backend_cleanup():
return backend_cleanup
-@shared_task
+@connect_on_app_finalize
def add_unlock_chord_task(app):
"""This task is used by result backends without native chord support.
@@ -127,7 +104,7 @@ def unlock_chord(group_id, callback, interval=None, propagate=None,
return unlock_chord
-@shared_task
+@connect_on_app_finalize
def add_map_task(app):
from celery.canvas import signature
@@ -138,7 +115,7 @@ def xmap(task, it):
return xmap
-@shared_task
+@connect_on_app_finalize
def add_starmap_task(app):
from celery.canvas import signature
@@ -149,7 +126,7 @@ def xstarmap(task, it):
return xstarmap
-@shared_task
+@connect_on_app_finalize
def add_chunk_task(app):
from celery.canvas import chunks as _chunks
@@ -159,7 +136,7 @@ def chunks(task, it, n):
return chunks
-@shared_task
+@connect_on_app_finalize
def add_group_task(app):
_app = app
from celery.canvas import maybe_signature, signature
@@ -226,7 +203,7 @@ def apply(self, args=(), kwargs={}, **options):
return Group
-@shared_task
+@connect_on_app_finalize
def add_chain_task(app):
from celery.canvas import (
Signature, chain, chord, group, maybe_signature, maybe_unroll_group,
@@ -322,7 +299,7 @@ def apply(self, args=(), kwargs={}, signature=maybe_signature,
return Chain
-@shared_task
+@connect_on_app_finalize
def add_chord_task(app):
"""Every chord is executed in a dedicated task, so that the chord
can be used as a signature, and this generates the task
diff --git a/celery/app/task.py b/celery/app/task.py
index e8a4697a2..b20974424 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -176,14 +176,14 @@ def __new__(cls, name, bases, attrs):
# Hairy stuff, here to be compatible with 2.x.
# People should not use non-abstract task classes anymore,
# use the task decorator.
- from celery.app.builtins import shared_task
+ from celery._state import connect_on_app_finalize
unique_name = '.'.join([task_module, name])
if unique_name not in cls._creation_count:
# the creation count is used as a safety
# so that the same task is not added recursively
# to the set of constructors.
cls._creation_count[unique_name] = 1
- shared_task(_CompatShared(
+ connect_on_app_finalize(_CompatShared(
unique_name,
lambda app: TaskType.__new__(cls, name, bases,
dict(attrs, _app=app)),
diff --git a/celery/tests/app/test_app.py b/celery/tests/app/test_app.py
index 36cdb67a2..5bb1ef61e 100644
--- a/celery/tests/app/test_app.py
+++ b/celery/tests/app/test_app.py
@@ -251,7 +251,7 @@ def foo(shared=False):
_state._task_stack.pop()
def test_task_not_shared(self):
- with patch('celery.app.base.shared_task') as sh:
+ with patch('celery.app.base.connect_on_app_finalize') as sh:
@self.app.task(shared=False)
def foo():
pass
From 6f3dd4b512459855036c1e51eb252f71add55a02 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 16 Apr 2014 23:40:28 +0100
Subject: [PATCH 0079/1103] Updates Changelog
---
Changelog | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 65 insertions(+), 2 deletions(-)
diff --git a/Changelog b/Changelog
index 7726a13f8..a44812cd0 100644
--- a/Changelog
+++ b/Changelog
@@ -12,14 +12,77 @@ new in Celery 3.1.
3.1.11
======
-:release-date: 2014-04-XX XX:XX X.X UTC
-:release-by: XXX
+:release-date: 2014-04-16 11:00 P.M UTC
+:release-by: Ask Solem
+
+- Now compatible with RabbitMQ 3.3.0
+
+ You need to run Celery 3.1.11 or later when using RabbitMQ 3.3,
+ and if you use the ``librabbitmq`` module you also have to upgrade
+ to librabbitmq 1.5.0:
+
+ .. code-block:: bash
+
+ $ pip install -U librabbitmq
+
+- **Requirements**:
+
+ - Now depends on :ref:`Kombu 3.0.15 `.
+
+ - Now depends on `billiard 3.3.0.17`_.
+
+ - Bundle ``celery[librabbitmq]`` now depends on :mod:`librabbitmq` 1.5.0.
+
+.. _`billiard 3.3.0.17`:
+ https://github.com/celery/billiard/blob/master/CHANGES.txt
+
+- **Tasks**: The :setting:`CELERY_DEFAULT_DELIVERY_MODE` setting was being
+ ignored (Issue #1953).
+
+- **Worker**: New :option:`--heartbeat-interval` can be used to change the
+ time (in seconds) between sending event heartbeats.
+
+ Contributed by Matthew Duggan and Craig Northway.
+
+- **App**: Fixed memory leaks occurring when creating lots of temporary
+ app instances (Issue #1949).
- **MongoDB**: SSL configuration with non-MongoDB transport breaks MongoDB
results backend (Issue #1973).
Fix contributed by Brian Bouterse.
+- **Logging**: The color formatter accidentally modified ``record.msg``
+ (Issue #1939).
+
+- **Results**: Fixed problem with task trails being stored multiple times,
+ causing ``result.collect()`` to hang (Issue #1936, Issue #1943).
+
+- **Results**: ``ResultSet`` now implements a ``.backend`` attribute for
+ compatibility with ``AsyncResult``.
+
+- **Results**: ``.forget()`` now also clears the local cache.
+
+- **Results**: Fixed problem with multiple calls to ``result._set_cache``
+ (Issue #1940).
+
+- **Results**: ``join_native`` populated result cache even if disabled.
+
+- **Results**: The YAML result serializer should now be able to handle storing
+ exceptions.
+
+- **Worker**: No longer sends task error emails for expected errors (in
+ ``@task(throws=(..., )))``.
+
+- **Canvas**: Fixed problem with exception deserialization when using
+ the JSON serializer (Issue #1987).
+
+- **Eventlet**: Fixes crash when ``celery.contrib.batches`` attempted to
+ cancel a non-existing timer (Issue #1984).
+
+- Can now import ``celery.version_info_t``, and ``celery.five`` (Issue #1968).
+
+
.. _version-3.1.10:
3.1.10
From b05df838c07f1f88fb755ff9bd1bad376a7821fe Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 17 Apr 2014 00:33:24 +0100
Subject: [PATCH 0080/1103] Must import builtins
---
celery/app/base.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/celery/app/base.py b/celery/app/base.py
index 22e4a480b..1a1300ca1 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -45,6 +45,9 @@
AppPickler, Settings, bugreport, _unpickle_app, _unpickle_app_v2, appstr,
)
+# Load all builtin tasks
+from . import builtins # noqa
+
__all__ = ['Celery']
_EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING')
From 68ac8968a4eabac95a424297b51e3c46a4b4c7aa Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 17 Apr 2014 00:45:59 +0100
Subject: [PATCH 0081/1103] Changelog cosmetics
---
Changelog | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Changelog b/Changelog
index a44812cd0..7c7e35165 100644
--- a/Changelog
+++ b/Changelog
@@ -15,7 +15,7 @@ new in Celery 3.1.
:release-date: 2014-04-16 11:00 P.M UTC
:release-by: Ask Solem
-- Now compatible with RabbitMQ 3.3.0
+- **Now compatible with RabbitMQ 3.3.0**
You need to run Celery 3.1.11 or later when using RabbitMQ 3.3,
and if you use the ``librabbitmq`` module you also have to upgrade
From 2a0ff2641b081a7b6a43ba6e3d0533dd5f82d393 Mon Sep 17 00:00:00 2001
From: ffeast
Date: Thu, 17 Apr 2014 13:55:30 +0400
Subject: [PATCH 0082/1103] removed centos/celeryd CELERYD_CHDIR quoting
---
extra/centos/celeryd | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/extra/centos/celeryd b/extra/centos/celeryd
index 8b43b6112..879a99f63 100644
--- a/extra/centos/celeryd
+++ b/extra/centos/celeryd
@@ -95,7 +95,7 @@ if [ -n "$CELERYD_GROUP" ]; then
fi
if [ -n "$CELERYD_CHDIR" ]; then
- DAEMON_OPTS="$DAEMON_OPTS --workdir=\"$CELERYD_CHDIR\""
+ DAEMON_OPTS="$DAEMON_OPTS --workdir=$CELERYD_CHDIR"
fi
check_dev_null() {
From 4f5e8b80b77d867546dd7bede5ef71536d87dcd5 Mon Sep 17 00:00:00 2001
From: ocean1
Date: Sat, 12 Apr 2014 01:07:14 +0200
Subject: [PATCH 0083/1103] add a groupmeta_collection option to save group
results in a different collection than results and add a 'bson' fake kombu
encoder to allow pymongo to serialize natively data in mongodb
---
celery/backends/mongodb.py | 107 ++++++++++++++++++++++++++++++++-----
1 file changed, 95 insertions(+), 12 deletions(-)
diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
index c3229d51c..70af35aae 100644
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -20,21 +20,48 @@
from bson.binary import Binary
except ImportError: # pragma: no cover
from pymongo.binary import Binary # noqa
+ from pymongo.errors import InvalidDocument # noqa
else: # pragma: no cover
Binary = None # noqa
+ InvalidDocument = None # noqa
from kombu.syn import detect_environment
from kombu.utils import cached_property
-
+from kombu.exceptions import EncodeError
+from kombu.serialization import register, disable_insecure_serializers
from celery import states
from celery.exceptions import ImproperlyConfigured
from celery.five import string_t
from celery.utils.timeutils import maybe_timedelta
+from celery.result import AsyncResult
from .base import BaseBackend
__all__ = ['MongoBackend']
+BINARY_CODECS = frozenset(['pickle','msgpack'])
+
+#register a fake bson serializer which will return the document as it is
+class bson_serializer():
+ @staticmethod
+ def loads(obj, *args, **kwargs):
+ if isinstance(obj,string_t):
+ try:
+ from anyjson import loads
+ return loads(obj)
+ except:
+ pass
+ return obj
+
+ @staticmethod
+ def dumps(obj, *args, **kwargs):
+ return obj
+
+register('bson', bson_serializer.loads, bson_serializer.dumps,
+ content_type='application/data',
+ content_encoding='utf-8')
+
+disable_insecure_serializers(['json','bson'])
class Bunch(object):
@@ -43,6 +70,7 @@ def __init__(self, **kw):
class MongoBackend(BaseBackend):
+
host = 'localhost'
port = 27017
user = None
@@ -64,10 +92,16 @@ def __init__(self, *args, **kwargs):
"""
self.options = {}
+
super(MongoBackend, self).__init__(*args, **kwargs)
self.expires = kwargs.get('expires') or maybe_timedelta(
self.app.conf.CELERY_TASK_RESULT_EXPIRES)
+ # little hack to get over standard kombu loads because
+ # mongo return strings which don't get decoded!
+ if self.serializer == 'bson':
+ self.decode = self.decode_bson
+
if not pymongo:
raise ImproperlyConfigured(
'You need to install the pymongo library to use the '
@@ -88,6 +122,9 @@ def __init__(self, *args, **kwargs):
self.taskmeta_collection = config.pop(
'taskmeta_collection', self.taskmeta_collection,
)
+ self.groupmeta_collection = config.pop(
+ 'groupmeta_collection', self.taskmeta_collection,
+ )
self.options = dict(config, **config.pop('options', None) or {})
@@ -101,6 +138,7 @@ def __init__(self, *args, **kwargs):
# Specifying backend as an URL
self.host = url
+
def _get_connection(self):
"""Connect to the MongoDB server."""
if self._connection is None:
@@ -132,25 +170,50 @@ def process_cleanup(self):
del(self.database)
self._connection = None
+ def encode(self, data):
+ payload = super(MongoBackend, self).encode(data)
+ #serializer which are in a unsupported format (pickle/binary)
+ if self.serializer in BINARY_CODECS:
+ payload = Binary(payload)
+
+ return payload
+
+ def decode_bson(self, data):
+ return bson_serializer.loads(data)
+
+ def encode_result(self, result, status):
+ if status in self.EXCEPTION_STATES and isinstance(result, Exception):
+ return self.prepare_exception(result)
+ else:
+ return self.prepare_value(result)
+
def _store_result(self, task_id, result, status,
traceback=None, request=None, **kwargs):
"""Store return value and status of an executed task."""
+
meta = {'_id': task_id,
'status': status,
- 'result': Binary(self.encode(result)),
+ 'result': self.encode(result),
'date_done': datetime.utcnow(),
- 'traceback': Binary(self.encode(traceback)),
- 'children': Binary(self.encode(
+ 'traceback': self.encode(traceback),
+ 'children': self.encode(
self.current_task_children(request),
- ))}
- self.collection.save(meta)
+ )}
+
+ try:
+ self.collection.save(meta)
+ except InvalidDocument as exc:
+ raise EncodeError(exc)
return result
def _get_task_meta_for(self, task_id):
"""Get task metadata for a task by id."""
- obj = self.collection.find_one({'_id': task_id})
+ # if collection don't contain it try searching in the
+ # group_collection it could be a groupresult instead
+ obj = self.collection.find_one({'_id': task_id}) or \
+ self.group_collection.find_one({'_id': task_id})
if not obj:
return {'status': states.PENDING, 'result': None}
@@ -167,22 +230,29 @@ def _get_task_meta_for(self, task_id):
def _save_group(self, group_id, result):
"""Save the group result."""
+
+ task_ids = [ i.id for i in result ]
+
meta = {'_id': group_id,
- 'result': Binary(self.encode(result)),
+ 'result': self.encode(task_ids),
'date_done': datetime.utcnow()}
- self.collection.save(meta)
+ self.group_collection.save(meta)
return result
def _restore_group(self, group_id):
"""Get the result for a group by id."""
- obj = self.collection.find_one({'_id': group_id})
+ obj = self.group_collection.find_one({'_id': group_id})
if not obj:
return
+ tasks = self.decode(obj['result'])
+
+ tasks = [ AsyncResult(task) for task in tasks ]
+
meta = {
'task_id': obj['_id'],
- 'result': self.decode(obj['result']),
+ 'result': tasks,
'date_done': obj['date_done'],
}
@@ -190,7 +260,7 @@ def _restore_group(self, group_id):
def _delete_group(self, group_id):
"""Delete a group by id."""
- self.collection.remove({'_id': group_id})
+ self.group_collection.remove({'_id': group_id})
def _forget(self, task_id):
"""
@@ -209,6 +279,9 @@ def cleanup(self):
self.collection.remove(
{'date_done': {'$lt': self.app.now() - self.expires}},
)
+ self.group_collection.remove(
+ {'date_done': {'$lt': self.app.now() - self.expires}},
+ )
def __reduce__(self, args=(), kwargs={}):
kwargs.update(
@@ -240,3 +313,13 @@ def collection(self):
# in the background. Once completed cleanup will be much faster
collection.ensure_index('date_done', background='true')
return collection
+
+ @cached_property
+ def group_collection(self):
+ """Get the metadata task collection."""
+ collection = self.database[self.groupmeta_collection]
+
+ # Ensure an index on date_done is there, if not process the index
+ # in the background. Once completed cleanup will be much faster
+ collection.ensure_index('date_done', background='true')
+ return collection
From 6373b5a8e0dee9faeabcc48b721c744816fae830 Mon Sep 17 00:00:00 2001
From: Ian Dees
Date: Thu, 17 Apr 2014 17:08:37 -0500
Subject: [PATCH 0084/1103] Correct import in security docs.
---
docs/userguide/security.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/userguide/security.rst b/docs/userguide/security.rst
index 027ad5489..ef3cd9635 100644
--- a/docs/userguide/security.rst
+++ b/docs/userguide/security.rst
@@ -168,7 +168,7 @@ with the private key and certificate files located in `/etc/ssl`.
CELERY_SECURITY_KEY = '/etc/ssl/private/worker.key'
CELERY_SECURITY_CERTIFICATE = '/etc/ssl/certs/worker.pem'
CELERY_SECURITY_CERT_STORE = '/etc/ssl/certs/*.pem'
- from celery import setup_security
+ from celery.security import setup_security
setup_security()
.. note::
From 2edea37f6aeb37b7ceb150c4ebc9cfbf85fdefa2 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 18 Apr 2014 15:43:51 +0100
Subject: [PATCH 0085/1103] Update AbortableTask docs. Closes #1993
---
celery/contrib/abortable.py | 67 +++++++++++++++++++------------------
1 file changed, 35 insertions(+), 32 deletions(-)
diff --git a/celery/contrib/abortable.py b/celery/contrib/abortable.py
index 37dc30d92..dcdc61566 100644
--- a/celery/contrib/abortable.py
+++ b/celery/contrib/abortable.py
@@ -28,49 +28,52 @@
.. code-block:: python
- from celery.contrib.abortable import AbortableTask
- from celery.utils.log import get_task_logger
-
- logger = get_logger(__name__)
-
- class MyLongRunningTask(AbortableTask):
-
- def run(self, **kwargs):
- results = []
- for x in range(100):
- # Check after every 5 loops..
- if x % 5 == 0: # alternatively, check when some timer is due
- if self.is_aborted(**kwargs):
- # Respect the aborted status and terminate
- # gracefully
- logger.warning('Task aborted.')
- return
- y = do_something_expensive(x)
- results.append(y)
- logger.info('Task finished.')
- return results
-
+ from __future__ import absolute_import
+
+ from celery.contrib.abortable import AbortableTask
+ from celery.utils.log import get_task_logger
+
+ from proj.celery import app
+
+ logger = get_logger(__name__)
+
+ @app.task(bind=True, base=AbortableTask)
+ def long_running_task(self):
+ results = []
+ for i in range(100):
+ # check after every 5 iterations...
+ # (or alternatively, check when some timer is due)
+ if not i % 5:
+ if self.is_aborted():
+ # respect aborted state, and terminate gracefully.
+ logger.warning('Task aborted')
+ return
+ value = do_something_expensive(i)
+ results.append(y)
+ logger.info('Task complete')
+ return results
In the producer:
.. code-block:: python
- from myproject.tasks import MyLongRunningTask
+ from __future__ import absolute_import
- def myview(request):
+ import time
- async_result = MyLongRunningTask.delay()
- # async_result is of type AbortableAsyncResult
+ from proj.tasks import MyLongRunningTask
- # After 10 seconds, abort the task
- time.sleep(10)
- async_result.abort()
+ def myview(request):
+ # result is of type AbortableAsyncResult
+ result = long_running_task.delay()
- ...
+ # abort the task after 10 seconds
+ time.sleep(10)
+ result.abort()
-After the `async_result.abort()` call, the task execution is not
+After the `result.abort()` call, the task execution is not
aborted immediately. In fact, it is not guaranteed to abort at all. Keep
-checking the `async_result` status, or call `async_result.wait()` to
+checking `result.state` status, or call `result.get(timeout=)` to
have it block until the task is finished.
.. note::
From 5a3014663c90e94a846a495f9619fb1ff8cdd30b Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 22 Apr 2014 14:48:52 +0100
Subject: [PATCH 0086/1103] Adds flower to intersphinx
---
docs/conf.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/docs/conf.py b/docs/conf.py
index 914aee712..2cee3992a 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -81,6 +81,7 @@ def linkcode_resolve(domain, info):
'djcelery': ('http://django-celery.readthedocs.org/en/latest', None),
'cyme': ('http://cyme.readthedocs.org/en/latest', None),
'amqp': ('http://amqp.readthedocs.org/en/latest', None),
+ 'flower': ('http://flower.readthedocs.org/en/latest', None),
}
# The name of the Pygments (syntax highlighting) style to use.
From 0fe113513bd5779115d5ec80fccdadf77ef9543c Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 23 Apr 2014 15:45:24 +0100
Subject: [PATCH 0087/1103] Fixes broken rst ref. Closes #1998
---
docs/userguide/calling.rst | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/docs/userguide/calling.rst b/docs/userguide/calling.rst
index 5d2150cbb..bfddf408a 100644
--- a/docs/userguide/calling.rst
+++ b/docs/userguide/calling.rst
@@ -318,7 +318,12 @@ or for each individual task, or even per message.
There's built-in support for :mod:`pickle`, `JSON`, `YAML`
and `msgpack`, and you can also add your own custom serializers by registering
-them into the Kombu serializer registry (see ref:`kombu:guide-serialization`).
+them into the Kombu serializer registry
+
+.. seealso::
+
+ :ref:`Message Serialization ` in the Kombu user
+ guide.
Each option has its advantages and disadvantages.
From c9e217a6a881393076b02ac6a733fa2b1f04fbd3 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 23 Apr 2014 16:02:10 +0100
Subject: [PATCH 0088/1103] Travis: Only get IRC notification when build fixed
and failed
---
.travis.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.travis.yml b/.travis.yml
index 3690f624f..e6b5146be 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -37,5 +37,5 @@ notifications:
irc:
channels:
- "chat.freenode.net#celery"
- on_success: always
+ on_success: change
on_failure: always
From 04a12eb1ff5d83bd8df0d6acfdd40486724c138a Mon Sep 17 00:00:00 2001
From: ffeast
Date: Tue, 22 Apr 2014 13:29:58 +0400
Subject: [PATCH 0089/1103] Support of missing CELERY_BIN and CELERY_APP,
described in
http://celery.readthedocs.org/en/latest/tutorials/daemonizing.html#id11
---
extra/centos/celeryd | 3 ++-
extra/centos/celeryd.sysconfig | 9 +++++----
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/extra/centos/celeryd b/extra/centos/celeryd
index 879a99f63..c5e3b555c 100644
--- a/extra/centos/celeryd
+++ b/extra/centos/celeryd
@@ -71,7 +71,7 @@ if [ -z "$CELERYD_LOG_FILE" ]; then
fi
CELERYD_LOG_LEVEL=${CELERYD_LOG_LEVEL:-${CELERYD_LOGLEVEL:-$DEFAULT_LOG_LEVEL}}
-CELERYD_MULTI=${CELERYD_MULTI:-"celeryd-multi"}
+CELERYD_MULTI=${CELERYD_MULTI:-"${CELERY_BIN} multi"}
CELERYD=${CELERYD:-$DEFAULT_CELERYD}
CELERYD_NODES=${CELERYD_NODES:-$DEFAULT_NODES}
@@ -85,6 +85,7 @@ fi
CELERYD_LOG_DIR=`dirname $CELERYD_LOG_FILE`
CELERYD_PID_DIR=`dirname $CELERYD_PID_FILE`
+CELERYD_OPTS=${CELERYD_OPTS:-"--app=$CELERY_APP"}
# Extra start-stop-daemon options, like user/group.
if [ -n "$CELERYD_USER" ]; then
diff --git a/extra/centos/celeryd.sysconfig b/extra/centos/celeryd.sysconfig
index e1d98bd4d..c6f2d54c6 100644
--- a/extra/centos/celeryd.sysconfig
+++ b/extra/centos/celeryd.sysconfig
@@ -1,4 +1,5 @@
# In CentOS, contents should be placed in the file /etc/sysconfig/celeryd
+# Available options: http://celery.readthedocs.org/en/latest/tutorials/daemonizing.html#available-options
# Names of nodes to start (space-separated)
#CELERYD_NODES="my_application-node_1"
@@ -6,11 +7,11 @@
# Where to chdir at start. This could be the root of a virtualenv.
#CELERYD_CHDIR="/path/to/my_application"
-# How to call celeryd-multi
-#CELERYD_MULTI="$CELERYD_CHDIR/bin/celeryd-multi"
+# Absolute or relative path to the celery program
+#CELERY_BIN="/usr/local/bin/celery"
-# Extra arguments
-#CELERYD_OPTS="--app=my_application.path.to.worker --time-limit=300 --concurrency=8 --loglevel=DEBUG"
+# App instance to use (value for --app argument).
+#CELERY_APP="my_application"
# Create log/pid dirs, if they don't already exist
#CELERY_CREATE_DIRS=1
From c67fae736eb485bfabd36bb3406fa9e706bb94ea Mon Sep 17 00:00:00 2001
From: ffeast
Date: Tue, 22 Apr 2014 16:01:27 +0400
Subject: [PATCH 0090/1103] Added centos celerybeat init script + sysconfig
example + simple tests
---
extra/centos/celerybeat | 239 ++++++++++++++++++++++++++++++
extra/centos/celerybeat.sysconfig | 15 ++
extra/centos/test_celerybeat.sh | 6 +
extra/centos/test_celeryd.sh | 37 +----
extra/centos/test_service.sh | 43 ++++++
5 files changed, 304 insertions(+), 36 deletions(-)
create mode 100644 extra/centos/celerybeat
create mode 100644 extra/centos/celerybeat.sysconfig
create mode 100755 extra/centos/test_celerybeat.sh
create mode 100755 extra/centos/test_service.sh
diff --git a/extra/centos/celerybeat b/extra/centos/celerybeat
new file mode 100644
index 000000000..b51ab0762
--- /dev/null
+++ b/extra/centos/celerybeat
@@ -0,0 +1,239 @@
+#!/bin/sh
+# ============================================
+# celerybeat - Starts the Celery periodic task scheduler.
+# ============================================
+#
+# :Usage: /etc/init.d/celerybeat {start|stop|restart|status}
+# :Configuration file: /etc/sysconfig/celerybeat
+#
+# See http://docs.celeryproject.org/en/latest/tutorials/daemonizing.html
+
+### BEGIN INIT INFO
+# Provides: celerybeat
+# Required-Start: $network $local_fs $remote_fs
+# Required-Stop: $network $local_fs $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: celery task worker daemon
+### END INIT INFO
+#
+#
+# To implement separate init scripts, do NOT copy this script. Instead,
+# symlink it. I.e., if my new application, "little-worker" needs an init, I
+# should just use:
+#
+# ln -s /etc/init.d/celerybeat /etc/init.d/little-worker
+#
+# You can then configure this by manipulating /etc/sysconfig/little-worker.
+#
+# Setting `prog` here allows you to symlink this init script, making it easy
+# to run multiple processes on the system.
+
+# If we're invoked via SysV-style runlevel scripts we need to follow the
+# link from rcX.d before working out the script name.
+if [[ `dirname $0` == /etc/rc*.d ]]; then
+ target="$(readlink $0)"
+else
+ target=$0
+fi
+
+prog="$(basename $target)"
+
+# Source the centos service helper functions
+source /etc/init.d/functions
+# NOTE: "set -e" does not work with the above functions,
+# which use non-zero return codes as non-error return conditions
+
+# some commands work asyncronously, so we'll wait this many seconds
+SLEEP_SECONDS=5
+
+DEFAULT_PID_FILE="/var/run/celery/$prog.pid"
+DEFAULT_LOG_FILE="/var/log/celery/$prog.log"
+DEFAULT_LOG_LEVEL="INFO"
+DEFAULT_NODES="celery"
+
+CELERY_DEFAULTS=${CELERY_DEFAULTS:-"/etc/sysconfig/$prog"}
+
+test -f "$CELERY_DEFAULTS" && . "$CELERY_DEFAULTS"
+
+# Set CELERY_CREATE_DIRS to always create log/pid dirs.
+CELERY_CREATE_DIRS=${CELERY_CREATE_DIRS:-0}
+CELERY_CREATE_RUNDIR=$CELERY_CREATE_DIRS
+CELERY_CREATE_LOGDIR=$CELERY_CREATE_DIRS
+if [ -z "$CELERYBEAT_PID_FILE" ]; then
+ CELERYBEAT_PID_FILE="$DEFAULT_PID_FILE"
+ CELERY_CREATE_RUNDIR=1
+fi
+if [ -z "$CELERYBEAT_LOG_FILE" ]; then
+ CELERYBEAT_LOG_FILE="$DEFAULT_LOG_FILE"
+ CELERY_CREATE_LOGDIR=1
+fi
+
+CELERYBEAT_LOG_LEVEL=${CELERYBEAT_LOG_LEVEL:-${CELERYBEAT_LOGLEVEL:-$DEFAULT_LOG_LEVEL}}
+CELERYBEAT=${CELERYBEAT:-"${CELERY_BIN} beat"}
+CELERYBEAT=${CELERYBEAT:-$DEFAULT_CELERYBEAT}
+CELERYBEAT_NODES=${CELERYBEAT_NODES:-$DEFAULT_NODES}
+
+# This is used to change how Celery loads in the configs. It does not need to
+# be set to be run.
+export CELERY_LOADER
+
+if [ -n "$2" ]; then
+ CELERYBEAT_OPTS="$CELERYBEAT_OPTS $2"
+fi
+
+CELERYBEAT_OPTS=${CELERYBEAT_OPTS:-"--app=$CELERY_APP"}
+CELERYBEAT_LOG_DIR=`dirname $CELERYBEAT_LOG_FILE`
+CELERYBEAT_PID_DIR=`dirname $CELERYBEAT_PID_FILE`
+
+# Extra start-stop-daemon options, like user/group.
+if [ -n "$CELERYBEAT_USER" ]; then
+ DAEMON_OPTS="$DAEMON_OPTS --uid=$CELERYBEAT_USER"
+fi
+if [ -n "$CELERYBEAT_GROUP" ]; then
+ DAEMON_OPTS="$DAEMON_OPTS --gid=$CELERYBEAT_GROUP"
+fi
+
+if [ -n "$CELERYBEAT_CHDIR" ]; then
+ DAEMON_OPTS="$DAEMON_OPTS --workdir=$CELERYBEAT_CHDIR"
+fi
+
+check_dev_null() {
+ if [ ! -c /dev/null ]; then
+ echo "/dev/null is not a character device!"
+ exit 75 # EX_TEMPFAIL
+ fi
+}
+
+
+maybe_die() {
+ if [ $? -ne 0 ]; then
+ echo "Exiting: $* (errno $?)"
+ exit 77 # EX_NOPERM
+ fi
+}
+
+create_default_dir() {
+ if [ ! -d "$1" ]; then
+ echo "- Creating default directory: '$1'"
+ mkdir -p "$1"
+ maybe_die "Couldn't create directory $1"
+ echo "- Changing permissions of '$1' to 02755"
+ chmod 02755 "$1"
+ maybe_die "Couldn't change permissions for $1"
+ if [ -n "$CELERYBEAT_USER" ]; then
+ echo "- Changing owner of '$1' to '$CELERYBEAT_USER'"
+ chown "$CELERYBEAT_USER" "$1"
+ maybe_die "Couldn't change owner of $1"
+ fi
+ if [ -n "$CELERYBEAT_GROUP" ]; then
+ echo "- Changing group of '$1' to '$CELERYBEAT_GROUP'"
+ chgrp "$CELERYBEAT_GROUP" "$1"
+ maybe_die "Couldn't change group of $1"
+ fi
+ fi
+}
+
+
+check_paths() {
+ if [ $CELERY_CREATE_LOGDIR -eq 1 ]; then
+ create_default_dir "$CELERYBEAT_LOG_DIR"
+ fi
+ if [ $CELERY_CREATE_RUNDIR -eq 1 ]; then
+ create_default_dir "$CELERYBEAT_PID_DIR"
+ fi
+}
+
+create_paths() {
+ create_default_dir "$CELERYBEAT_LOG_DIR"
+ create_default_dir "$CELERYBEAT_PID_DIR"
+}
+
+export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
+
+stop() {
+ [[ ! -f "$CELERYBEAT_PID_FILE" ]] && echo "$prog is stopped" && return 0
+
+ local one_failed=
+ echo -n $"Stopping $prog: "
+
+ # killproc comes from 'functions' and brings three nice features:
+ # 1. sending TERM, sleeping, then sleeping more if needed, then sending KILL
+ # 2. handling 'success' and 'failure' output
+ # 3. removes stale pid files, if any remain
+ killproc -p "$CELERYBEAT_PID_FILE" -d "$SLEEP_SECONDS" $prog || one_failed=true
+ echo
+
+ [[ "$one_failed" ]] && return 1 || return 0
+}
+
+start() {
+ echo -n $"Starting $prog: "
+
+ # If Celery is already running, bail out
+ if [[ -f "$CELERYBEAT_PID_FILE" ]]; then
+ echo -n "$prog is already running. Use 'restart'."
+ failure
+ echo
+ return 1
+ fi
+
+ $CELERYBEAT $CELERYBEAT_OPTS $DAEMON_OPTS --detach \
+ --pidfile="$CELERYBEAT_PID_FILE" \
+ --logfile="$CELERYBEAT_LOG_FILE" \
+ --loglevel="$CELERYBEAT_LOG_LEVEL"
+
+ if [[ "$?" == "0" ]]; then
+ # Sleep a few seconds to give Celery a chance to initialize itself.
+ # This is useful to prevent scripts following this one from trying to
+ # use Celery (or its pid files) too early.
+ sleep $SLEEP_SECONDS
+ if [[ -f "$CELERYBEAT_PID_FILE" ]]; then
+ success
+ echo
+ return 0
+ else # celerybeat succeeded but no pid files found
+ failure
+ fi
+ else # celerybeat did not succeed
+ failure
+ fi
+ echo
+ return 1
+}
+
+check_status() {
+ status -p "$CELERYBEAT_PID_FILE" $"$prog" || return 1
+ return 0
+}
+
+case "$1" in
+ start)
+ check_dev_null
+ check_paths
+ start
+ ;;
+
+ stop)
+ check_dev_null
+ check_paths
+ stop
+ ;;
+
+ status)
+ check_status
+ ;;
+
+ restart)
+ check_dev_null
+ check_paths
+ stop && start
+ ;;
+
+ *)
+ echo "Usage: /etc/init.d/$prog {start|stop|restart|status}"
+ exit 3
+ ;;
+esac
+
+exit $?
diff --git a/extra/centos/celerybeat.sysconfig b/extra/centos/celerybeat.sysconfig
new file mode 100644
index 000000000..50015151e
--- /dev/null
+++ b/extra/centos/celerybeat.sysconfig
@@ -0,0 +1,15 @@
+# In CentOS, contents should be placed in the file /etc/sysconfig/celeryd
+# Available options: http://celery.readthedocs.org/en/latest/tutorials/daemonizing.html#init-script-celerybeat
+
+# Where the Django project is.
+#CELERYBEAT_CHDIR="/path/to/my_application"
+
+# Absolute or relative path to the celery program
+#CELERY_BIN="/usr/local/bin/celery"
+
+# App instance to use (value for --app argument).
+#CELERY_APP="my_application.path.to.worker"
+
+# Beat run as an unprivileged user
+#CELERYBEAT_USER="brandings"
+#CELERYBEAT_GROUP="brandings"
diff --git a/extra/centos/test_celerybeat.sh b/extra/centos/test_celerybeat.sh
new file mode 100755
index 000000000..d60829d2d
--- /dev/null
+++ b/extra/centos/test_celerybeat.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+# If you make changes to the celerybeat init script,
+# you can use this test script to verify you didn't break the universe
+
+./test_service.sh celerybeat
diff --git a/extra/centos/test_celeryd.sh b/extra/centos/test_celeryd.sh
index a331c2c83..89429e924 100755
--- a/extra/centos/test_celeryd.sh
+++ b/extra/centos/test_celeryd.sh
@@ -3,39 +3,4 @@
# If you make changes to the celeryd init script,
# you can use this test script to verify you didn't break the universe
-SERVICE="celeryd"
-SERVICE_CMD="sudo /sbin/service $SERVICE"
-
-run_test() {
- local msg="$1"
- local cmd="$2"
- local expected_retval="${3:-0}"
- local n=${#msg}
-
- echo
- echo `printf "%$((${n}+4))s" | tr " " "#"`
- echo "# $msg #"
- echo `printf "%$((${n}+4))s" | tr " " "#"`
-
- $cmd
- local retval=$?
- if [[ "$retval" == "$expected_retval" ]]; then
- echo "[PASSED]"
- else
- echo "[FAILED]"
- echo "Exit status: $retval, but expected: $expected_retval"
- exit $retval
- fi
-}
-
-run_test "stop should succeed" "$SERVICE_CMD stop" 0
-run_test "status on a stopped service should return 1" "$SERVICE_CMD status" 1
-run_test "stopping a stopped celery should not fail" "$SERVICE_CMD stop" 0
-run_test "start should succeed" "$SERVICE_CMD start" 0
-run_test "status on a running service should return 0" "$SERVICE_CMD status" 0
-run_test "starting a running service should fail" "$SERVICE_CMD start" 1
-run_test "restarting a running service should succeed" "$SERVICE_CMD restart" 0
-run_test "status on a restarted service should return 0" "$SERVICE_CMD status" 0
-run_test "stop should succeed" "$SERVICE_CMD stop" 0
-
-echo "All tests passed!"
+./test_service.sh celeryd
diff --git a/extra/centos/test_service.sh b/extra/centos/test_service.sh
new file mode 100755
index 000000000..d5a33ba38
--- /dev/null
+++ b/extra/centos/test_service.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+if [ -z "$1" ]; then
+ echo 'service name is not specified'
+ exit -1
+fi
+
+SERVICE="$1"
+SERVICE_CMD="sudo /sbin/service $SERVICE"
+
+run_test() {
+ local msg="$1"
+ local cmd="$2"
+ local expected_retval="${3:-0}"
+ local n=${#msg}
+
+ echo
+ echo `printf "%$((${n}+4))s" | tr " " "#"`
+ echo "# $msg #"
+ echo `printf "%$((${n}+4))s" | tr " " "#"`
+
+ $cmd
+ local retval=$?
+ if [[ "$retval" == "$expected_retval" ]]; then
+ echo "[PASSED]"
+ else
+ echo "[FAILED]"
+ echo "Exit status: $retval, but expected: $expected_retval"
+ exit $retval
+ fi
+}
+
+run_test "stop should succeed" "$SERVICE_CMD stop" 0
+run_test "status on a stopped service should return 1" "$SERVICE_CMD status" 1
+run_test "stopping a stopped celery should not fail" "$SERVICE_CMD stop" 0
+run_test "start should succeed" "$SERVICE_CMD start" 0
+run_test "status on a running service should return 0" "$SERVICE_CMD status" 0
+run_test "starting a running service should fail" "$SERVICE_CMD start" 1
+run_test "restarting a running service should succeed" "$SERVICE_CMD restart" 0
+run_test "status on a restarted service should return 0" "$SERVICE_CMD status" 0
+run_test "stop should succeed" "$SERVICE_CMD stop" 0
+
+echo "All tests passed!"
From 2dcc8de08ef8576d7dc924a43608410c213cd3c0 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 25 Apr 2014 14:02:07 +0100
Subject: [PATCH 0091/1103] Worker --detach must forward working_directory
option. Closes #2003
---
celery/bin/celeryd_detach.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/celery/bin/celeryd_detach.py b/celery/bin/celeryd_detach.py
index 1db2ff041..12e1f6497 100644
--- a/celery/bin/celeryd_detach.py
+++ b/celery/bin/celeryd_detach.py
@@ -30,6 +30,7 @@
C_FAKEFORK = os.environ.get('C_FAKEFORK')
OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + (
+ Option('--workdir', default='/', dest='working_directory'),
Option('--fake',
default=False, action='store_true', dest='fake',
help="Don't fork (for debugging purposes)"),
From dba299a15277000d2860b510c80571721550e92c Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 25 Apr 2014 14:03:09 +0100
Subject: [PATCH 0092/1103] New signals: app.on_configure, .on_after_configure,
.on_after_finalize
---
celery/app/base.py | 30 ++++++++++++++++++++++++-----
docs/reference/celery.rst | 12 ++++++++++--
funtests/stress/stress/app.py | 10 ++--------
funtests/stress/stress/templates.py | 7 ++++++-
4 files changed, 43 insertions(+), 16 deletions(-)
diff --git a/celery/app/base.py b/celery/app/base.py
index 1a1300ca1..b0079f7db 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -34,6 +34,7 @@
from celery.five import items, values
from celery.loaders import get_loader_cls
from celery.local import PromiseProxy, maybe_evaluate
+from celery.utils.dispatch import Signal
from celery.utils.functional import first, maybe_list
from celery.utils.imports import instantiate, symbol_by_name
from celery.utils.objects import mro_lookup
@@ -117,6 +118,15 @@ class Celery(object):
_pool = None
builtin_fixups = BUILTIN_FIXUPS
+ #: Signal sent when app is loading configuration.
+ on_configure = None
+
+ #: Signal sent after app has prepared the configuration.
+ on_after_configure = None
+
+ #: Signal sent after app has been finalized.
+ on_after_finalize = None
+
def __init__(self, main=None, loader=None, backend=None,
amqp=None, events=None, log=None, control=None,
set_as_current=True, accept_magic_kwargs=False,
@@ -171,6 +181,13 @@ def __init__(self, main=None, loader=None, backend=None,
if self.set_as_current:
self.set_current()
+ # Signals
+ if self.on_configure is None:
+ # used to be a method pre 3.2
+ self.on_configure = Signal()
+ self.on_after_configure = Signal()
+ self.on_after_finalize = Signal()
+
self.on_init()
_register_app(self)
@@ -283,6 +300,8 @@ def finalize(self, auto=False):
for task in values(self._tasks):
task.bind(self)
+ self.on_after_finalize.send(sender=self)
+
def add_defaults(self, fun):
if not callable(fun):
d, fun = fun, lambda: d
@@ -455,12 +474,12 @@ def _get_backend(self):
self.loader)
return backend(app=self, url=url)
- def on_configure(self):
- """Callback calld when the app loads configuration"""
- pass
-
def _get_config(self):
- self.on_configure()
+ if isinstance(self.on_configure, Signal):
+ self.on_configure.send(sender=self)
+ else:
+ # used to be a method pre 3.2
+ self.on_configure()
if self._config_source:
self.loader.config_from_object(self._config_source)
self.configured = True
@@ -474,6 +493,7 @@ def _get_config(self):
if self._preconf:
for key, value in items(self._preconf):
setattr(s, key, value)
+ self.on_after_configure.send(sender=self, source=s)
return s
def _after_fork(self, obj_):
diff --git a/docs/reference/celery.rst b/docs/reference/celery.rst
index d87cfdca1..a99c7963a 100644
--- a/docs/reference/celery.rst
+++ b/docs/reference/celery.rst
@@ -382,9 +382,17 @@ and creating Celery applications.
Finalizes the app by loading built-in tasks,
and evaluating pending task decorators
- .. method:: Celery.on_configure()
+ .. signal:: on_configure
- Optional callback for when the first time the configured is required.
+ Signal sent when app is loading configuration.
+
+ .. signal:: on_after_configure
+
+ Signal sent after app has prepared the configuration.
+
+ .. signal:: on_after_finalize
+
+ Signal sent after app has been finalized.
.. attribute:: Celery.Pickler
diff --git a/funtests/stress/stress/app.py b/funtests/stress/stress/app.py
index 0a9690cfb..077437d89 100644
--- a/funtests/stress/stress/app.py
+++ b/funtests/stress/stress/app.py
@@ -33,7 +33,7 @@ def __init__(self, *args, **kwargs):
)
)
signals.user_preload_options.connect(self.on_preload_parsed)
- self.after_configure = None
+ self.on_configure.connect(self._maybe_use_default_template)
def on_preload_parsed(self, options=None, **kwargs):
self.use_template(options['template'])
@@ -44,13 +44,7 @@ def use_template(self, name='default'):
use_template(self, name)
self.template_selected = True
- def _get_config(self):
- ret = super(App, self)._get_config()
- if self.after_configure:
- self.after_configure(ret)
- return ret
-
- def on_configure(self):
+ def _maybe_use_default_template(self, **kwargs):
if not self.template_selected:
self.use_template('default')
diff --git a/funtests/stress/stress/templates.py b/funtests/stress/stress/templates.py
index 73b8cd3bf..09e820454 100644
--- a/funtests/stress/stress/templates.py
+++ b/funtests/stress/stress/templates.py
@@ -23,7 +23,12 @@ def _register(cls):
def use_template(app, template='default'):
template = template.split(',')
- app.after_configure = partial(mixin_templates, template[1:])
+
+ # mixin the rest of the templates when the config is needed
+ @app.on_after_configure.connect
+ def load_template(sender, source, **kwargs):
+ mixin_templates(template[1:], source)
+
app.config_from_object(templates[template[0]])
From fd4701ce72f0fc17ad6e941cd25ffedfcdd03d32 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 25 Apr 2014 14:33:10 +0100
Subject: [PATCH 0093/1103] Fixes bugs with bootsteps: requires attribute not
inherited by subclasses and using module paths did not work properly. Closes
#2002
---
celery/bootsteps.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/celery/bootsteps.py b/celery/bootsteps.py
index 9c0427fe6..4471a4cb3 100644
--- a/celery/bootsteps.py
+++ b/celery/bootsteps.py
@@ -232,6 +232,8 @@ def _find_last(self):
return next((C for C in values(self.steps) if C.last), None)
def _firstpass(self, steps):
+ for step in values(steps):
+ step.requires = [symbol_by_name(dep) for dep in step.requires]
stream = deque(step.requires for step in values(steps))
while stream:
for node in stream.popleft():
@@ -283,7 +285,6 @@ def __new__(cls, name, bases, attrs):
attrs.update(
__qualname__=qname,
name=attrs.get('name') or qname,
- requires=attrs.get('requires', ()),
)
return super(StepType, cls).__new__(cls, name, bases, attrs)
From bc7eb64af22c1c8c482f5066483d52967d165ecf Mon Sep 17 00:00:00 2001
From: Dmitry Malinovsky
Date: Mon, 28 Apr 2014 11:46:05 +0700
Subject: [PATCH 0094/1103] Fixed wrong arguments
---
celery/bin/worker.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index 44be17e4d..dc0407500 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -175,7 +175,7 @@ def run_from_argv(self, prog_name, argv=None, command=None):
# parse options before detaching so errors can be handled.
options, args = self.prepare_args(
*self.parse_options(prog_name, argv, command))
- self.maybe_detach([command] + sys.argv[1:])
+ self.maybe_detach([command] + argv)
return self(*args, **options)
def maybe_detach(self, argv, dopts=['-D', '--detach']):
From 607567dbabc5794ecd3ba4a4ddc05e1338f70996 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 28 Apr 2014 13:04:31 +0100
Subject: [PATCH 0095/1103] Fixes tests
---
celery/tests/bin/test_celeryd_detach.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/celery/tests/bin/test_celeryd_detach.py b/celery/tests/bin/test_celeryd_detach.py
index 2b6e5ae8d..000d2f633 100644
--- a/celery/tests/bin/test_celeryd_detach.py
+++ b/celery/tests/bin/test_celeryd_detach.py
@@ -85,6 +85,7 @@ def test_execute_from_commandline(self, detach, exit):
detach.assert_called_with(
path=x.execv_path, uid=None, gid=None,
umask=0, fake=False, logfile='/var/log', pidfile='celeryd.pid',
+ working_directory='/',
argv=x.execv_argv + [
'-c', '1', '-lDEBUG',
'--logfile=/var/log', '--pidfile=celeryd.pid',
From 2b4a3a7ff6daaa1f09b86b525ef03a117d6d7ce6 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 30 Apr 2014 22:09:49 +0100
Subject: [PATCH 0096/1103] Stresstests: envvar C_SLEEP can now be used to add
tracebacks to sleep calls
---
funtests/stress/stress/__init__.py | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/funtests/stress/stress/__init__.py b/funtests/stress/stress/__init__.py
index 747647ffe..089130cba 100644
--- a/funtests/stress/stress/__init__.py
+++ b/funtests/stress/stress/__init__.py
@@ -1,4 +1,19 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
+import os
+import time
+
+if os.environ.get('C_SLEEP'):
+
+ _orig_sleep = time.sleep
+
+ def _sleep(n):
+ print('WARNING: Time sleep for {0}s'.format(n))
+ import traceback
+ traceback.print_stack()
+ _orig_sleep(n)
+ time.sleep = _sleep
+
+
from .app import app # noqa
From a3360b5bf02e06cb5551e420938b020aab8f9cc3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ionel=20Cristian=20M=C4=83rie=C8=99?=
Date: Fri, 2 May 2014 16:42:56 +0300
Subject: [PATCH 0097/1103] Remove python3.4 travis fixups (it's preinstalled
now)
---
.travis.yml | 7 -------
1 file changed, 7 deletions(-)
diff --git a/.travis.yml b/.travis.yml
index e6b5146be..f0c96caa6 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -17,13 +17,6 @@ before_install:
sudo apt-get install pypy
source ~/virtualenv/pypy/bin/activate
fi
- if [[ $TOXENV = 3.4 ]]; then
- sudo apt-get update
- sudo apt-get install python3.4-dev
- source ~/virtualenv/python3.4
- virtualenv ~/virtualenv/python3.4 --python=$(which python3.4)
- source ~/virtualenv/python3.4/bin/activate
- fi
python --version
uname -a
lsb_release -a
From cdb745343c4bde164d624163be04da9a9ae65db2 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 6 May 2014 13:36:42 +0100
Subject: [PATCH 0098/1103] Fixes stress test templates and adds SQS template
---
funtests/stress/stress/templates.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/funtests/stress/stress/templates.py b/funtests/stress/stress/templates.py
index 09e820454..931269807 100644
--- a/funtests/stress/stress/templates.py
+++ b/funtests/stress/stress/templates.py
@@ -25,7 +25,7 @@ def use_template(app, template='default'):
template = template.split(',')
# mixin the rest of the templates when the config is needed
- @app.on_after_configure.connect
+ @app.on_after_configure.connect(weak=False)
def load_template(sender, source, **kwargs):
mixin_templates(template[1:], source)
@@ -116,3 +116,11 @@ class events(default):
@template()
class execv(default):
CELERYD_FORCE_EXECV = True
+
+
+@template()
+class sqs(default):
+ BROKER_URL='sqs://'
+ BROKER_TRANSPORT_OPTIONS = {
+ 'region': os.environ.get('AWS_REGION', 'us-east-1'),
+ }
From 3e9119d4972545d9c2d4d210c07558305dfd6afe Mon Sep 17 00:00:00 2001
From: Jay Farrimond
Date: Mon, 5 May 2014 15:03:50 -0700
Subject: [PATCH 0099/1103] non-string dict keys in django-celery configs
This fix allows celery-flower to not have problems displaying
configurations for projects that still use configurations embedded
in django settings files. In this instance there are some int
dict keys that are totally unrelated to celery but that are causing
frequent error messages in the celeryd logs.
---
celery/worker/control.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/celery/worker/control.py b/celery/worker/control.py
index 8de8ac838..6016543c7 100644
--- a/celery/worker/control.py
+++ b/celery/worker/control.py
@@ -14,7 +14,7 @@
from kombu.utils.encoding import safe_repr
from celery.exceptions import WorkerShutdown
-from celery.five import UserDict, items
+from celery.five import UserDict, items, string_t
from celery.platforms import signals as _signals
from celery.utils import timeutils
from celery.utils.functional import maybe_list
@@ -364,7 +364,7 @@ def active_queues(state):
def _wanted_config_key(key):
- return key.isupper() and not key.startswith('__')
+ return isinstance(key, string_t) and key.isupper() and not key.startswith('__')
@Panel.register
From 0f111b1834c3c1ecfe2d40add9c334989f6ecaa4 Mon Sep 17 00:00:00 2001
From: Mher Movsisyan
Date: Tue, 6 May 2014 16:59:00 +0400
Subject: [PATCH 0100/1103] Adds stats for eventlet pool
---
celery/concurrency/eventlet.py | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/celery/concurrency/eventlet.py b/celery/concurrency/eventlet.py
index e5319a9b8..613b28a86 100644
--- a/celery/concurrency/eventlet.py
+++ b/celery/concurrency/eventlet.py
@@ -142,3 +142,10 @@ def on_apply(self, target, args=None, kwargs=None, callback=None,
self._quick_put(apply_target, target, args, kwargs,
callback, accept_callback,
self.getpid)
+
+ def _get_info(self):
+ return {
+ 'max-concurrency': self.limit,
+ 'free-threads': self._pool.free(),
+ 'running-threads': self._pool.running(),
+ }
From f1601c075564590112abcd344dde829b1e22b23e Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 6 May 2014 16:02:56 +0100
Subject: [PATCH 0101/1103] Small doc stuff
---
celery/bin/worker.py | 4 ++--
docs/configuration.rst | 3 ++-
docs/internals/guide.rst | 27 +++++++++++++++++++++++++++
3 files changed, 31 insertions(+), 3 deletions(-)
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index dc0407500..d5592f85f 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -71,8 +71,8 @@
.. cmdoption:: -E, --events
- Send events that can be captured by monitors like :program:`celery events`,
- `celerymon`, and others.
+ Send task-related events that can be captured by monitors like
+ :program:`celery events`, `celerymon`, and others.
.. cmdoption:: --without-gossip
diff --git a/docs/configuration.rst b/docs/configuration.rst
index 3f787f270..864b255dd 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -1509,7 +1509,8 @@ Events
CELERY_SEND_EVENTS
~~~~~~~~~~~~~~~~~~
-Send events so the worker can be monitored by tools like `celerymon`.
+Send task-related events so that tasks can be monitored using tools like
+`flower`. Sets the default value for the workers :option:`-E` argument.
.. setting:: CELERY_SEND_TASK_SENT_EVENT
diff --git a/docs/internals/guide.rst b/docs/internals/guide.rst
index 941f7b11d..6a4be2f34 100644
--- a/docs/internals/guide.rst
+++ b/docs/internals/guide.rst
@@ -305,3 +305,30 @@ Module Overview
- celery.contrib
Additional public code that doesn't fit into any other namespace.
+
+Worker overview
+===============
+
+* `celery.bin.worker:Worker`
+
+ This is the command-line interface to the worker.
+
+ Responsibilities:
+ * Daemonization when `--detach` set,
+ * dropping privileges when using `--uid`/`--gid` arguments
+ * Installs "concurrency patches" (eventlet/gevent monkey patches).
+
+ ``app.worker_main(argv)`` calls
+ ``instantiate('celery.bin.worker:Worker')(app).execute_from_commandline(argv)``
+
+* `app.Worker` -> `celery.apps.worker:Worker`
+
+ Responsibilities:
+ * sets up logging and redirects stdouts
+ * installs signal handlers (`TERM`/`HUP`/`STOP`/`USR1` (cry)/`USR2` (rdb))
+ * prints banner and warnings (e.g. pickle warning)
+ * handles the ``--purge`` argument
+
+* `app.WorkController` -> `celery.worker.WorkController`
+
+ This is the real worker, built up around bootsteps.
From 2c37c41b051a6c214e453668883e88fccafb7bda Mon Sep 17 00:00:00 2001
From: Luke Pomfrey
Date: Fri, 2 May 2014 13:06:33 +0100
Subject: [PATCH 0102/1103] Fix handling of non-string keys in
filter_hidden_settings
---
celery/app/utils.py | 11 ++++++-----
celery/tests/app/test_utils.py | 18 +++++++++++++++++-
2 files changed, 23 insertions(+), 6 deletions(-)
diff --git a/celery/app/utils.py b/celery/app/utils.py
index ba5e1bb8b..a409d8fac 100644
--- a/celery/app/utils.py
+++ b/celery/app/utils.py
@@ -175,11 +175,12 @@ def filter_hidden_settings(conf):
def maybe_censor(key, value, mask='*' * 8):
if isinstance(value, Mapping):
return filter_hidden_settings(value)
- if isinstance(value, string_t) and HIDDEN_SETTINGS.search(key):
- return mask
- if isinstance(key, string_t) and 'BROKER_URL' in key.upper():
- from kombu import Connection
- return Connection(value).as_uri(mask=mask)
+ if isinstance(key, string_t):
+ if HIDDEN_SETTINGS.search(key):
+ return mask
+ if 'BROKER_URL' in key.upper():
+ from kombu import Connection
+ return Connection(value).as_uri(mask=mask)
return value
return {k: maybe_censor(k, v) for k, v in items(conf)}
diff --git a/celery/tests/app/test_utils.py b/celery/tests/app/test_utils.py
index dc7e38110..b0ff108e8 100644
--- a/celery/tests/app/test_utils.py
+++ b/celery/tests/app/test_utils.py
@@ -2,7 +2,7 @@
from collections import Mapping, MutableMapping
-from celery.app.utils import Settings, bugreport
+from celery.app.utils import Settings, filter_hidden_settings, bugreport
from celery.tests.case import AppCase, Mock
@@ -20,6 +20,22 @@ def test_is_mutable_mapping(self):
self.assertTrue(issubclass(Settings, MutableMapping))
+class test_filter_hidden_settings(AppCase):
+
+ def test_handles_non_string_keys(self):
+ """filter_hidden_settings shouldn't raise an exception when handling
+ mappings with non-string keys"""
+ conf = {
+ 'STRING_KEY': 'VALUE1',
+ ('NON', 'STRING', 'KEY'): 'VALUE2',
+ 'STRING_KEY2': {
+ 'STRING_KEY3': 1,
+ ('NON', 'STRING', 'KEY', '2'): 2
+ },
+ }
+ filter_hidden_settings(conf)
+
+
class test_bugreport(AppCase):
def test_no_conn_driver_info(self):
From dc9e9755ba41eb03556a135831040c91046b1eb6 Mon Sep 17 00:00:00 2001
From: Luke Pomfrey
Date: Tue, 6 May 2014 16:16:32 +0100
Subject: [PATCH 0103/1103] Adding self to CONTRIBUTORS.txt
---
CONTRIBUTORS.txt | 1 +
1 file changed, 1 insertion(+)
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index edf2f120a..3484a6712 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -160,3 +160,4 @@ Martin Davidsson, 2014/02/08
Chris Clark, 2014/02/20
Matthew Duggan, 2014/04/10
Brian Bouterse, 2014/04/10
+Luke Pomfrey, 2014/05/06
From 61288aa2a8150b2affbaa6845a2c537d1aff3622 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 7 May 2014 11:12:21 +0100
Subject: [PATCH 0104/1103] Updates system configuration
---
extra/systemd/celery.conf | 13 ++++++++++++-
extra/systemd/celery.service | 16 +++++++++++-----
2 files changed, 23 insertions(+), 6 deletions(-)
diff --git a/extra/systemd/celery.conf b/extra/systemd/celery.conf
index d490fe793..08b90cf28 100644
--- a/extra/systemd/celery.conf
+++ b/extra/systemd/celery.conf
@@ -1,2 +1,13 @@
+# See
+# http://docs.celeryproject.org/en/latest/tutorials/daemonizing.html#available-options
+
+CELERY_APP="proj"
+CELERYD_NODES="worker"
+CELERYD_OPTS=""
+CELERY_BIN="/usr/bin/python2 -m celery"
+CELERYD_PID_FILE="/var/run/celery/%n.pid"
+CELERYD_LOG_FILE="/var/log/celery/%n.log"
+CELERYD_LOG_LEVEL="INFO"
+
d /run/celery 0755 user users -
-d /var/log/celery 0755 user users -
\ No newline at end of file
+d /var/log/celery 0755 user users -
diff --git a/extra/systemd/celery.service b/extra/systemd/celery.service
index 31f17bdb1..5729d2924 100644
--- a/extra/systemd/celery.service
+++ b/extra/systemd/celery.service
@@ -1,17 +1,23 @@
[Unit]
-Description=Celery Nodes Daemon
+Description=Celery workers
After=network.target
[Service]
Type=forking
User=user
Group=users
-#Environment=DJANGO_SETTINGS_MODULE=MyProject.settings
EnvironmentFile=-/etc/conf.d/celery
WorkingDirectory=/opt/Myproject/
-ExecStart=/usr/bin/python2 ${CELERY_BIN} $CELERYD_MULTI start $CELERYD_NODES --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} --loglevel="INFO" $CELERYD_OPTS
-ExecStop=/usr/bin/python2 ${CELERY_BIN} $CELERYD_MULTI stopwait $CELERYD_NODES --pidfile=${CELERYD_PID_FILE}
-ExecReload=/usr/bin/python2 ${CELERY_BIN} $CELERYD_MULTI restart $CELERYD_NODES --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} --loglevel="INFO" $CELERYD_OPTS
+ExecStart=${CELERY_BIN} multi start $CELERYD_NODES \
+ -A $CELERY_APP --pidfile=${CELERYD_PID_FILE} \
+ --logfile=${CELERYD_LOG_FILE} --loglevel="${CELERYD_LOG_LEVEL}" \
+ $CELERYD_OPTS
+ExecStop=${CELERY_BIN} multi stopwait $CELERYD_NODES \
+ --pidfile=${CELERYD_PID_FILE}
+ExecReload=${CELERY_BIN} multi restart $CELERYD_NODES \
+ -A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} \
+ --logfile=${CELERYD_LOG_FILE} --loglevel="${CELERYD_LOG_LEVEL}" \
+ $CELERYD_OPTS
[Install]
WantedBy=multi-user.target
From 661bbfe98452e67e23a98718dd034dd811712e74 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 7 May 2014 16:42:11 +0100
Subject: [PATCH 0105/1103] Renames Task.subtask -> .signature (with alias for
compat)
---
celery/app/task.py | 22 ++++++++++++--------
celery/task/base.py | 1 +
celery/tests/tasks/test_chord.py | 10 ++++-----
docs/getting-started/next-steps.rst | 32 ++++++++++++++---------------
docs/internals/guide.rst | 2 +-
docs/internals/protocol.rst | 10 ++++-----
docs/reference/celery.rst | 2 +-
docs/userguide/calling.rst | 8 ++++----
docs/userguide/canvas.rst | 23 +++++++++------------
docs/userguide/tasks.rst | 6 +++---
examples/resultgraph/tasks.py | 2 +-
11 files changed, 60 insertions(+), 58 deletions(-)
diff --git a/celery/app/task.py b/celery/app/task.py
index b20974424..38eef7b5d 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -555,8 +555,8 @@ def apply_async(self, args=None, kwargs=None, task_id=None, producer=None,
**dict(self._get_exec_options(), **options)
)
- def subtask_from_request(self, request=None, args=None, kwargs=None,
- queue=None, **extra_options):
+ def signature_from_request(self, request=None, args=None, kwargs=None,
+ queue=None, **extra_options):
request = self.request if request is None else request
args = request.args if args is None else args
kwargs = request.kwargs if kwargs is None else kwargs
@@ -573,7 +573,10 @@ def subtask_from_request(self, request=None, args=None, kwargs=None,
options.update(
{'queue': queue} if queue else (request.delivery_info or {})
)
- return self.subtask(args, kwargs, options, type=self, **extra_options)
+ return self.signature(
+ args, kwargs, options, type=self, **extra_options
+ )
+ subtask_from_request = signature_from_request
def retry(self, args=None, kwargs=None, exc=None, throw=True,
eta=None, countdown=None, max_retries=None, **options):
@@ -647,7 +650,7 @@ def retry(self, args=None, kwargs=None, exc=None, throw=True,
countdown = self.default_retry_delay
is_eager = request.is_eager
- S = self.subtask_from_request(
+ S = self.signature_from_request(
request, args, kwargs,
countdown=countdown, eta=eta, retries=retries,
**options
@@ -748,20 +751,21 @@ def AsyncResult(self, task_id, **kwargs):
return self._get_app().AsyncResult(task_id, backend=self.backend,
task_name=self.name, **kwargs)
- def subtask(self, args=None, *starargs, **starkwargs):
+ def signature(self, args=None, *starargs, **starkwargs):
"""Return :class:`~celery.signature` object for
this task, wrapping arguments and execution options
for a single task invocation."""
starkwargs.setdefault('app', self.app)
return signature(self, args, *starargs, **starkwargs)
+ subtask = signature
def s(self, *args, **kwargs):
- """``.s(*a, **k) -> .subtask(a, k)``"""
- return self.subtask(args, kwargs)
+ """``.s(*a, **k) -> .signature(a, k)``"""
+ return self.signature(args, kwargs)
def si(self, *args, **kwargs):
- """``.si(*a, **k) -> .subtask(a, k, immutable=True)``"""
- return self.subtask(args, kwargs, immutable=True)
+ """``.si(*a, **k) -> .signature(a, k, immutable=True)``"""
+ return self.signature(args, kwargs, immutable=True)
def chunks(self, it, n):
"""Creates a :class:`~celery.canvas.chunks` task for this task."""
diff --git a/celery/task/base.py b/celery/task/base.py
index 9e12d4f8c..6feffc48d 100644
--- a/celery/task/base.py
+++ b/celery/task/base.py
@@ -24,6 +24,7 @@
#: list of methods that must be classmethods in the old API.
_COMPAT_CLASSMETHODS = (
'delay', 'apply_async', 'retry', 'apply', 'subtask_from_request',
+ 'signature_from_request', 'signature',
'AsyncResult', 'subtask', '_get_request', '_get_exec_options',
)
diff --git a/celery/tests/tasks/test_chord.py b/celery/tests/tasks/test_chord.py
index 47e771841..531d06467 100644
--- a/celery/tests/tasks/test_chord.py
+++ b/celery/tests/tasks/test_chord.py
@@ -142,7 +142,7 @@ def callback(*args, **kwargs):
fail_current = self.app.backend.fail_from_current_stack = Mock()
try:
with patch_unlock_retry(self.app) as (unlock, retry):
- subtask, canvas.maybe_signature = (
+ signature, canvas.maybe_signature = (
canvas.maybe_signature, passthru,
)
if setup:
@@ -160,7 +160,7 @@ def callback(*args, **kwargs):
except Retry:
pass
finally:
- canvas.maybe_signature = subtask
+ canvas.maybe_signature = signature
yield callback_s, retry, fail_current
finally:
result.GroupResult = pts
@@ -211,7 +211,7 @@ def test_apply(self):
body = self.add.s(2)
result = x(body)
self.assertTrue(result.id)
- # does not modify original subtask
+ # does not modify original signature
with self.assertRaises(KeyError):
body.options['task_id']
self.assertTrue(chord._type.called)
@@ -228,6 +228,6 @@ def test_run(self):
Chord = self.app.tasks['celery.chord']
body = dict()
- Chord(group(self.add.subtask((i, i)) for i in range(5)), body)
- Chord([self.add.subtask((j, j)) for j in range(5)], body)
+ Chord(group(self.add.signature((i, i)) for i in range(5)), body)
+ Chord([self.add.signature((j, j)) for j in range(5)], body)
self.assertEqual(self.app.backend.apply_chord.call_count, 2)
diff --git a/docs/getting-started/next-steps.rst b/docs/getting-started/next-steps.rst
index d25282d16..bc10f801a 100644
--- a/docs/getting-started/next-steps.rst
+++ b/docs/getting-started/next-steps.rst
@@ -275,7 +275,7 @@ so that no message is sent::
These three methods - :meth:`delay`, :meth:`apply_async`, and applying
(``__call__``), represents the Celery calling API, which are also used for
-subtasks.
+signatures.
A more detailed overview of the Calling API can be found in the
:ref:`Calling User Guide `.
@@ -380,16 +380,16 @@ Calling tasks is described in detail in the
You just learned how to call a task using the tasks ``delay`` method,
and this is often all you need, but sometimes you may want to pass the
signature of a task invocation to another process or as an argument to another
-function, for this Celery uses something called *subtasks*.
+function, for this Celery uses something called *signatures*.
-A subtask wraps the arguments and execution options of a single task
+A signature wraps the arguments and execution options of a single task
invocation in a way such that it can be passed to functions or even serialized
and sent across the wire.
-You can create a subtask for the ``add`` task using the arguments ``(2, 2)``,
+You can create a signature for the ``add`` task using the arguments ``(2, 2)``,
and a countdown of 10 seconds like this::
- >>> add.subtask((2, 2), countdown=10)
+ >>> add.signature((2, 2), countdown=10)
tasks.add(2, 2)
There is also a shortcut using star arguments::
@@ -400,12 +400,12 @@ There is also a shortcut using star arguments::
And there's that calling API again…
-----------------------------------
-Subtask instances also supports the calling API, which means that they
+Signature instances also supports the calling API, which means that they
have the ``delay`` and ``apply_async`` methods.
-But there is a difference in that the subtask may already have
+But there is a difference in that the signature may already have
an argument signature specified. The ``add`` task takes two arguments,
-so a subtask specifying two arguments would make a complete signature::
+so a signature specifying two arguments would make a complete signature::
>>> s1 = add.s(2, 2)
>>> res = s1.delay()
@@ -418,8 +418,8 @@ But, you can also make incomplete signatures to create what we call
# incomplete partial: add(?, 2)
>>> s2 = add.s(2)
-``s2`` is now a partial subtask that needs another argument to be complete,
-and this can be resolved when calling the subtask::
+``s2`` is now a partial signature that needs another argument to be complete,
+and this can be resolved when calling the signature::
# resolves the partial: add(8, 2)
>>> res = s2.delay(8)
@@ -435,14 +435,14 @@ existing keyword arguments, but with new arguments taking precedence::
>>> s3 = add.s(2, 2, debug=True)
>>> s3.delay(debug=False) # debug is now False.
-As stated subtasks supports the calling API, which means that:
+As stated signatures supports the calling API, which means that:
-- ``subtask.apply_async(args=(), kwargs={}, **options)``
+- ``sig.apply_async(args=(), kwargs={}, **options)``
- Calls the subtask with optional partial arguments and partial
+ Calls the signature with optional partial arguments and partial
keyword arguments. Also supports partial execution options.
-- ``subtask.delay(*args, **kwargs)``
+- ``sig.delay(*args, **kwargs)``
Star argument version of ``apply_async``. Any arguments will be prepended
to the arguments in the signature, and keyword arguments is merged with any
@@ -466,7 +466,7 @@ The Primitives
- :ref:`starmap `
- :ref:`chunks `
-The primitives are subtasks themselves, so that they can be combined
+These primitives are signature objects themselves, so they can be combined
in any number of ways to compose complex workflows.
.. note::
@@ -556,7 +556,7 @@ to a chord:
90
-Since these primitives are all of the subtask type they
+Since these primitives are all of the signature type they
can be combined almost however you want, e.g::
>>> upload_document.s(file) | group(apply_filter.s() for filter in filters)
diff --git a/docs/internals/guide.rst b/docs/internals/guide.rst
index 6a4be2f34..36e053864 100644
--- a/docs/internals/guide.rst
+++ b/docs/internals/guide.rst
@@ -64,7 +64,7 @@ Naming
Sometimes it makes sense to have a class mask as a function,
and there is precedence for this in the stdlib (e.g.
:class:`~contextlib.contextmanager`). Celery examples include
- :class:`~celery.subtask`, :class:`~celery.chord`,
+ :class:`~celery.signature`, :class:`~celery.chord`,
``inspect``, :class:`~kombu.utils.functional.promise` and more..
- Factory functions and methods must be `CamelCase` (excluding verbs):
diff --git a/docs/internals/protocol.rst b/docs/internals/protocol.rst
index f80e6e8ff..6b7360b31 100644
--- a/docs/internals/protocol.rst
+++ b/docs/internals/protocol.rst
@@ -71,7 +71,7 @@ to process it.
The taskset this task is part of (if any).
* chord
- :`subtask`:
+ :`Signature`:
.. versionadded:: 2.3
@@ -88,18 +88,18 @@ to process it.
should be used.
* callbacks
- :`subtask`:
+ :`Signature`:
.. versionadded:: 3.0
- A list of subtasks to apply if the task exited successfully.
+ A list of signatures to call if the task exited successfully.
* errbacks
- :`subtask`:
+ :`Signature`:
.. versionadded:: 3.0
- A list of subtasks to apply if an error occurs while executing the task.
+ A list of signatures to call if an error occurs while executing the task.
* timelimit
:`(float, float)`:
diff --git a/docs/reference/celery.rst b/docs/reference/celery.rst
index a99c7963a..0363c446b 100644
--- a/docs/reference/celery.rst
+++ b/docs/reference/celery.rst
@@ -470,7 +470,7 @@ See :ref:`guide-canvas` for more about creating task workflows.
Signatures can also be created from tasks::
- >>> add.subtask(args=(), kwargs={}, options={})
+ >>> add.signature(args=(), kwargs={}, options={})
or the ``.s()`` shortcut::
diff --git a/docs/userguide/calling.rst b/docs/userguide/calling.rst
index bfddf408a..9701c4a1a 100644
--- a/docs/userguide/calling.rst
+++ b/docs/userguide/calling.rst
@@ -95,7 +95,7 @@ called `add`, returning the sum of two arguments:
.. topic:: There's another way…
You will learn more about this later while reading about the :ref:`Canvas
- `, but :class:`~celery.subtask`'s are objects used to pass around
+ `, but :class:`~celery.signature`'s are objects used to pass around
the signature of a task invocation, (for example to send it over the
network), and they also support the Calling API:
@@ -118,8 +118,8 @@ as a partial argument:
.. sidebar:: What is ``s``?
- The ``add.s`` call used here is called a subtask, I talk
- more about subtasks in the :ref:`canvas guide `,
+ The ``add.s`` call used here is called a signature, I talk
+ more about signatures in the :ref:`canvas guide `,
where you can also learn about :class:`~celery.chain`, which
is a simpler way to chain tasks together.
@@ -447,7 +447,7 @@ Though this particular example is much better expressed as a group:
>>> from celery import group
>>> numbers = [(2, 2), (4, 4), (8, 8), (16, 16)]
- >>> res = group(add.subtask(n) for i in numbers).apply_async()
+ >>> res = group(add.s(n) for i in numbers).apply_async()
>>> res.get()
[4, 8, 16, 32]
diff --git a/docs/userguide/canvas.rst b/docs/userguide/canvas.rst
index 0afff4dbf..f74e8e4be 100644
--- a/docs/userguide/canvas.rst
+++ b/docs/userguide/canvas.rst
@@ -26,9 +26,6 @@ A :func:`~celery.signature` wraps the arguments, keyword arguments, and executio
of a single task invocation in a way such that it can be passed to functions
or even serialized and sent across the wire.
-Signatures are often nicknamed "subtasks" because they describe a task to be called
-within a task.
-
- You can create a signature for the ``add`` task using its name like this::
>>> from celery import signature
@@ -38,9 +35,9 @@ within a task.
This task has a signature of arity 2 (two arguments): ``(2, 2)``,
and sets the countdown execution option to 10.
-- or you can create one using the task's ``subtask`` method::
+- or you can create one using the task's ``signature`` method::
- >>> add.subtask((2, 2), countdown=10)
+ >>> add.signature((2, 2), countdown=10)
tasks.add(2, 2)
- There is also a shortcut using star arguments::
@@ -55,7 +52,7 @@ within a task.
- From any signature instance you can inspect the different fields::
- >>> s = add.subtask((2, 2), {'debug': True}, countdown=10)
+ >>> s = add.signature((2, 2), {'debug': True}, countdown=10)
>>> s.args
(2, 2)
>>> s.kwargs
@@ -82,10 +79,10 @@ within a task.
``apply_async`` takes the same arguments as the :meth:`Task.apply_async <@Task.apply_async>` method::
>>> add.apply_async(args, kwargs, **options)
- >>> add.subtask(args, kwargs, **options).apply_async()
+ >>> add.signature(args, kwargs, **options).apply_async()
>>> add.apply_async((2, 2), countdown=1)
- >>> add.subtask((2, 2), countdown=1).apply_async()
+ >>> add.signature((2, 2), countdown=1).apply_async()
- You can't define options with :meth:`~@Task.s`, but a chaining
``set`` call takes care of that::
@@ -125,7 +122,7 @@ creates partials:
- Any options added will be merged with the options in the signature,
with the new options taking precedence::
- >>> s = add.subtask((2, 2), countdown=10)
+ >>> s = add.signature((2, 2), countdown=10)
>>> s.apply_async(countdown=1) # countdown is now 1
You can also clone signatures to create derivates:
@@ -147,7 +144,7 @@ Sometimes you want to specify a callback that does not take
additional arguments, and in that case you can set the signature
to be immutable::
- >>> add.apply_async((2, 2), link=reset_buffers.subtask(immutable=True))
+ >>> add.apply_async((2, 2), link=reset_buffers.signature(immutable=True))
The ``.si()`` shortcut can also be used to create immutable signatures::
@@ -289,7 +286,7 @@ Here's some examples:
In that case you can mark the signature as immutable, so that the arguments
cannot be changed::
- >>> add.subtask((2, 2), immutable=True)
+ >>> add.signature((2, 2), immutable=True)
There's also an ``.si`` shortcut for this::
@@ -419,7 +416,7 @@ The linked task will be applied with the result of its parent
task as the first argument, which in the above case will result
in ``mul(4, 16)`` since the result is 4.
-The results will keep track of what subtasks a task applies,
+The results will keep track of any subtasks called by the original task,
and this can be accessed from the result instance::
>>> res.children
@@ -456,7 +453,7 @@ You can also add *error callbacks* using the ``link_error`` argument::
>>> add.apply_async((2, 2), link_error=log_error.s())
- >>> add.subtask((2, 2), link_error=log_error.s())
+ >>> add.signature((2, 2), link_error=log_error.s())
Since exceptions can only be serialized when pickle is used
the error callbacks take the id of the parent task as argument instead:
diff --git a/docs/userguide/tasks.rst b/docs/userguide/tasks.rst
index 06351d5da..8e7cb0739 100644
--- a/docs/userguide/tasks.rst
+++ b/docs/userguide/tasks.rst
@@ -266,9 +266,9 @@ The request defines the following attributes:
:called_directly: This flag is set to true if the task was not
executed by the worker.
-:callbacks: A list of subtasks to be called if this task returns successfully.
+:callbacks: A list of signatures to be called if this task returns successfully.
-:errback: A list of subtasks to be called if this task fails.
+:errback: A list of signatures to be called if this task fails.
:utc: Set to true the caller has utc enabled (:setting:`CELERY_ENABLE_UTC`).
@@ -1297,7 +1297,7 @@ Make your design asynchronous instead, for example by using *callbacks*.
Here I instead created a chain of tasks by linking together
-different :func:`~celery.subtask`'s.
+different :func:`~celery.signature`'s.
You can read about chains and other powerful constructs
at :ref:`designing-workflows`.
diff --git a/examples/resultgraph/tasks.py b/examples/resultgraph/tasks.py
index bb14d2798..3c6dd81b0 100644
--- a/examples/resultgraph/tasks.py
+++ b/examples/resultgraph/tasks.py
@@ -16,7 +16,7 @@
# when the second task is ready.)
#
# >>> unlock_graph.apply_async((A.apply_async(),
-# ... A_callback.subtask()), countdown=1)
+# ... A_callback.s()), countdown=1)
from celery import chord, group, task, signature, uuid
From d79dcd8e82c5e41f39abd07ffed81ca58052bcd2 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 8 May 2014 16:48:43 +0100
Subject: [PATCH 0106/1103] Canvas refactor, parts of root_id and parent_id and
group no longer evaluates generators
---
celery/app/amqp.py | 10 +-
celery/app/base.py | 3 +-
celery/app/builtins.py | 187 ++-------------------
celery/app/task.py | 2 +
celery/canvas.py | 264 ++++++++++++++++++++++++------
celery/tests/app/test_builtins.py | 22 ++-
celery/tests/tasks/test_chord.py | 8 +-
docs/internals/protov2.rst | 3 +
8 files changed, 251 insertions(+), 248 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index a23f1d63b..e3f62b731 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -269,7 +269,8 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
expires=None, retries=0, chord=None,
callbacks=None, errbacks=None, reply_to=None,
time_limit=None, soft_time_limit=None,
- create_sent_event=False, now=None, timezone=None):
+ create_sent_event=False, now=None, timezone=None,
+ root_id=None, parent_id=None):
args = args or ()
kwargs = kwargs or {}
utc = self.utc
@@ -305,6 +306,8 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
'chord': chord,
'retries': retries,
'timelimit': (time_limit, soft_time_limit),
+ 'root_id': root_id,
+ 'parent_id': parent_id,
},
properties={
'correlation_id': task_id,
@@ -313,6 +316,8 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
body=(args, kwargs),
sent_event={
'uuid': task_id,
+ 'root': root_id,
+ 'parent': parent_id,
'name': name,
'args': safe_repr(args),
'kwargs': safe_repr(kwargs),
@@ -327,7 +332,8 @@ def as_task_v1(self, task_id, name, args=None, kwargs=None,
expires=None, retries=0,
chord=None, callbacks=None, errbacks=None, reply_to=None,
time_limit=None, soft_time_limit=None,
- create_sent_event=False, now=None, timezone=None):
+ create_sent_event=False, now=None, timezone=None,
+ root_id=None, parent_id=None):
args = args or ()
kwargs = kwargs or {}
utc = self.utc
diff --git a/celery/app/base.py b/celery/app/base.py
index b0079f7db..02590025a 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -351,7 +351,7 @@ def send_task(self, name, args=None, kwargs=None, countdown=None,
publisher=None, link=None, link_error=None,
add_to_parent=True, group_id=None, retries=0, chord=None,
reply_to=None, time_limit=None, soft_time_limit=None,
- **options):
+ root_id=None, parent_id=None, **options):
amqp = self.amqp
task_id = task_id or uuid()
producer = producer or publisher # XXX compat
@@ -369,6 +369,7 @@ def send_task(self, name, args=None, kwargs=None, countdown=None,
maybe_list(link), maybe_list(link_error),
reply_to or self.oid, time_limit, soft_time_limit,
self.conf.CELERY_SEND_TASK_SENT_EVENT,
+ root_id, parent_id,
)
if connection:
diff --git a/celery/app/builtins.py b/celery/app/builtins.py
index e42e0b25c..f08bf5054 100644
--- a/celery/app/builtins.py
+++ b/celery/app/builtins.py
@@ -9,10 +9,7 @@
"""
from __future__ import absolute_import
-from collections import deque
-
from celery._state import get_current_worker_task, connect_on_app_finalize
-from celery.utils import uuid
from celery.utils.log import get_logger
__all__ = []
@@ -44,7 +41,7 @@ def add_unlock_chord_task(app):
It joins chords by creating a task chain polling the header for completion.
"""
- from celery.canvas import signature
+ from celery.canvas import maybe_signature
from celery.exceptions import ChordError
from celery.result import allow_join_result, result_from_tuple
@@ -66,6 +63,8 @@ def unlock_chord(group_id, callback, interval=None, propagate=None,
interval = unlock_chord.default_retry_delay
# check if the task group is ready, and if so apply the callback.
+ callback = maybe_signature(callback, app)
+ root_id = callback.options.get('root_id')
deps = GroupResult(
group_id,
[result_from_tuple(r, app=app) for r in result],
@@ -73,7 +72,7 @@ def unlock_chord(group_id, callback, interval=None, propagate=None,
j = deps.join_native if deps.supports_native_join else deps.join
if deps.ready():
- callback = signature(callback, app=app)
+ callback = maybe_signature(callback, app=app)
try:
with allow_join_result():
ret = j(timeout=3.0, propagate=propagate)
@@ -139,7 +138,7 @@ def chunks(task, it, n):
@connect_on_app_finalize
def add_group_task(app):
_app = app
- from celery.canvas import maybe_signature, signature
+ from celery.canvas import maybe_signature
from celery.result import result_from_tuple
class Group(app.Task):
@@ -153,13 +152,8 @@ def run(self, tasks, result, group_id, partial_args,
app = self.app
result = result_from_tuple(result, app)
# any partial args are added to all tasks in the group
- taskit = (signature(task, app=app).clone(partial_args)
+ taskit = (maybe_signature(task, app=app).clone(partial_args)
for i, task in enumerate(tasks))
- if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER:
- return app.GroupResult(
- result.id,
- [stask.apply(group_id=group_id) for stask in taskit],
- )
with app.producer_or_acquire() as pub:
[stask.apply_async(group_id=group_id, producer=pub,
add_to_parent=False) for stask in taskit]
@@ -167,48 +161,11 @@ def run(self, tasks, result, group_id, partial_args,
if add_to_parent and parent:
parent.add_trail(result)
return result
-
- def prepare(self, options, tasks, args, **kwargs):
- options['group_id'] = group_id = (
- options.setdefault('task_id', uuid()))
-
- def prepare_member(task):
- task = maybe_signature(task, app=self.app)
- task.options['group_id'] = group_id
- return task, task.freeze()
-
- try:
- tasks, res = list(zip(
- *[prepare_member(task) for task in tasks]
- ))
- except ValueError: # tasks empty
- tasks, res = [], []
- return (tasks, self.app.GroupResult(group_id, res), group_id, args)
-
- def apply_async(self, partial_args=(), kwargs={}, **options):
- if self.app.conf.CELERY_ALWAYS_EAGER:
- return self.apply(partial_args, kwargs, **options)
- tasks, result, gid, args = self.prepare(
- options, args=partial_args, **kwargs
- )
- super(Group, self).apply_async((
- list(tasks), result.as_tuple(), gid, args), **options
- )
- return result
-
- def apply(self, args=(), kwargs={}, **options):
- return super(Group, self).apply(
- self.prepare(options, args=args, **kwargs),
- **options).get()
return Group
@connect_on_app_finalize
def add_chain_task(app):
- from celery.canvas import (
- Signature, chain, chord, group, maybe_signature, maybe_unroll_group,
- )
-
_app = app
class Chain(app.Task):
@@ -217,85 +174,6 @@ class Chain(app.Task):
accept_magic_kwargs = False
_decorated = True
- def prepare_steps(self, args, tasks):
- app = self.app
- steps = deque(tasks)
- next_step = prev_task = prev_res = None
- tasks, results = [], []
- i = 0
- while steps:
- # First task get partial args from chain.
- task = maybe_signature(steps.popleft(), app=app)
- task = task.clone() if i else task.clone(args)
- res = task.freeze()
- i += 1
-
- if isinstance(task, group):
- task = maybe_unroll_group(task)
- if isinstance(task, chain):
- # splice the chain
- steps.extendleft(reversed(task.tasks))
- continue
-
- elif isinstance(task, group) and steps and \
- not isinstance(steps[0], group):
- # automatically upgrade group(..) | s to chord(group, s)
- try:
- next_step = steps.popleft()
- # for chords we freeze by pretending it's a normal
- # task instead of a group.
- res = Signature.freeze(next_step)
- task = chord(task, body=next_step, task_id=res.task_id)
- except IndexError:
- pass # no callback, so keep as group
- if prev_task:
- # link previous task to this task.
- prev_task.link(task)
- # set the results parent attribute.
- if not res.parent:
- res.parent = prev_res
-
- if not isinstance(prev_task, chord):
- results.append(res)
- tasks.append(task)
- prev_task, prev_res = task, res
-
- return tasks, results
-
- def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
- task_id=None, link=None, link_error=None, **options):
- if self.app.conf.CELERY_ALWAYS_EAGER:
- return self.apply(args, kwargs, **options)
- options.pop('publisher', None)
- tasks, results = self.prepare_steps(args, kwargs['tasks'])
- result = results[-1]
- if group_id:
- tasks[-1].set(group_id=group_id)
- if chord:
- tasks[-1].set(chord=chord)
- if task_id:
- tasks[-1].set(task_id=task_id)
- result = tasks[-1].type.AsyncResult(task_id)
- # make sure we can do a link() and link_error() on a chain object.
- if link:
- tasks[-1].set(link=link)
- # and if any task in the chain fails, call the errbacks
- if link_error:
- for task in tasks:
- task.set(link_error=link_error)
- tasks[0].apply_async(**options)
- return result
-
- def apply(self, args=(), kwargs={}, signature=maybe_signature,
- **options):
- app = self.app
- last, fargs = None, args # fargs passed to first task only
- for task in kwargs['tasks']:
- res = signature(task, app=app).clone(fargs).apply(
- last and (last.get(), ),
- )
- res.parent, last, fargs = last, res, None
- return last
return Chain
@@ -304,10 +182,9 @@ def add_chord_task(app):
"""Every chord is executed in a dedicated task, so that the chord
can be used as a signature, and this generates the task
responsible for that."""
- from celery import group
+ from celery import group, chord as _chord
from celery.canvas import maybe_signature
_app = app
- default_propagate = app.conf.CELERY_CHORD_PROPAGATES
class Chord(app.Task):
app = _app
@@ -320,53 +197,13 @@ def run(self, header, body, partial_args=(), interval=None,
countdown=1, max_retries=None, propagate=None,
eager=False, **kwargs):
app = self.app
- propagate = default_propagate if propagate is None else propagate
- group_id = uuid()
-
# - convert back to group if serialized
tasks = header.tasks if isinstance(header, group) else header
header = group([
- maybe_signature(s, app=app).clone() for s in tasks
+ maybe_signature(s, app=app) for s in tasks
], app=self.app)
- # - eager applies the group inline
- if eager:
- return header.apply(args=partial_args, task_id=group_id)
-
- body.setdefault('chord_size', len(header.tasks))
- results = header.freeze(group_id=group_id, chord=body).results
-
- return self.backend.apply_chord(
- header, partial_args, group_id,
- body, interval=interval, countdown=countdown,
- max_retries=max_retries, propagate=propagate, result=results,
- )
-
- def apply_async(self, args=(), kwargs={}, task_id=None,
- group_id=None, chord=None, **options):
- app = self.app
- if app.conf.CELERY_ALWAYS_EAGER:
- return self.apply(args, kwargs, **options)
- header = kwargs.pop('header')
- body = kwargs.pop('body')
- header, body = (maybe_signature(header, app=app),
- maybe_signature(body, app=app))
- # forward certain options to body
- if chord is not None:
- body.options['chord'] = chord
- if group_id is not None:
- body.options['group_id'] = group_id
- [body.link(s) for s in options.pop('link', [])]
- [body.link_error(s) for s in options.pop('link_error', [])]
- body_result = body.freeze(task_id)
- parent = super(Chord, self).apply_async((header, body, args),
- kwargs, **options)
- body_result.parent = parent
- return body_result
-
- def apply(self, args=(), kwargs={}, propagate=True, **options):
- body = kwargs['body']
- res = super(Chord, self).apply(args, dict(kwargs, eager=True),
- **options)
- return maybe_signature(body, app=self.app).apply(
- args=(res.get(propagate=propagate).get(), ))
+ body = maybe_signature(body, app=app)
+ ch = _chord(header, body)
+ return ch.run(header, body, partial_args, app, interval,
+ countdown, max_retries, propagate, **kwargs)
return Chord
diff --git a/celery/app/task.py b/celery/app/task.py
index 38eef7b5d..4687f29d2 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -93,6 +93,8 @@ class Context(object):
headers = None
delivery_info = None
reply_to = None
+ root_id = None
+ parent_id = None
correlation_id = None
taskset = None # compat alias to group
group = None
diff --git a/celery/canvas.py b/celery/canvas.py
index 5efb75b09..16924eeba 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -12,6 +12,7 @@
"""
from __future__ import absolute_import
+from collections import deque
from copy import deepcopy
from functools import partial as _partial, reduce
from operator import itemgetter
@@ -19,7 +20,7 @@
from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid
-from celery._state import current_app
+from celery._state import current_app, get_current_worker_task
from celery.utils.functional import (
maybe_list, is_list, regen,
chunks as _chunks,
@@ -194,12 +195,13 @@ def clone(self, args=(), kwargs={}, **opts):
return s
partial = clone
- def freeze(self, _id=None, group_id=None, chord=None):
+ def freeze(self, _id=None, group_id=None, chord=None, root_id=None):
opts = self.options
try:
tid = opts['task_id']
except KeyError:
tid = opts['task_id'] = _id or uuid()
+ root_id = opts.setdefault('root_id', root_id)
if 'reply_to' not in opts:
opts['reply_to'] = self.app.oid
if group_id:
@@ -348,6 +350,99 @@ def __call__(self, *args, **kwargs):
if self.tasks:
return self.apply_async(args, kwargs)
+ def apply_async(self, args=(), kwargs={}, group_id=None, chord=None,
+ task_id=None, link=None, link_error=None,
+ publisher=None, root_id=None, **options):
+ app = self.app
+ if app.conf.CELERY_ALWAYS_EAGER:
+ return self.apply(args, kwargs, **options)
+ tasks, results = self.prepare_steps(
+ args, self.tasks, root_id, link_error,
+ )
+ if not results:
+ return
+ result = results[-1]
+ last_task = tasks[-1]
+ if group_id:
+ last_task.set(group_id=group_id)
+ if chord:
+ last_task.set(chord=chord)
+ if task_id:
+ last_task.set(task_id=task_id)
+ result = last_task.type.AsyncResult(task_id)
+ # make sure we can do a link() and link_error() on a chain object.
+ if link:
+ tasks[-1].set(link=link)
+ tasks[0].apply_async(**options)
+ return result
+
+ def prepare_steps(self, args, tasks,
+ root_id=None, link_error=None, app=None):
+ app = app or self.app
+ steps = deque(tasks)
+ next_step = prev_task = prev_res = None
+ tasks, results = [], []
+ i = 0
+ while steps:
+ task = steps.popleft()
+ if not i: # first task
+ # first task gets partial args from chain
+ task = task.clone(args)
+ res = task.freeze(root_id=root_id)
+ root_id = res.id if root_id is None else root_id
+ else:
+ task = task.clone()
+ res = task.freeze(root_id=root_id)
+ i += 1
+
+ if isinstance(task, group):
+ task = maybe_unroll_group(task)
+
+ if isinstance(task, chain):
+ # splice the chain
+ steps.extendleft(reversed(task.tasks))
+ continue
+ elif isinstance(task, group) and steps and \
+ not isinstance(steps[0], group):
+ # automatically upgrade group(...) | s to chord(group, s)
+ try:
+ next_step = steps.popleft()
+ # for chords we freeze by pretending it's a normal
+ # signature instead of a group.
+ res = Signature.freeze(next_step)
+ task = chord(
+ task, body=next_step,
+ task_id=res.task_id, root_id=root_id,
+ )
+ except IndexError:
+ pass # no callback, so keep as group.
+
+ if prev_task:
+ # link previous task to this task.
+ prev_task.link(task)
+ # set AsyncResult.parent
+ if not res.parent:
+ res.parent = prev_res
+
+ if link_error:
+ task.set(link_error=link_error)
+
+ if not isinstance(prev_task, chord):
+ results.append(res)
+ tasks.append(task)
+ prev_task, prev_res = task, res
+
+ return tasks, results
+
+ def apply(self, args=(), kwargs={}, **options):
+ last, fargs = None, args
+ for task in self.tasks:
+ res = task.clone(fargs).apply(
+ last and (last.get(), ), **options
+ )
+ res.parent, last, fargs = last, res, None
+ return last
+
@classmethod
def from_dict(self, d, app=None):
tasks = d['kwargs']['tasks']
@@ -357,11 +452,14 @@ def from_dict(self, d, app=None):
return chain(*d['kwargs']['tasks'], app=app, **kwdict(d['options']))
@property
- def type(self):
- try:
- return self._type or self.tasks[0].type.app.tasks['celery.chain']
- except KeyError:
- return self.app.tasks['celery.chain']
+ def app(self):
+ app = self._app
+ if app is None:
+ try:
+ app = self.tasks[0]._app
+ except (KeyError, IndexError):
+ pass
+ return app or current_app
def __repr__(self):
return ' | '.join(repr(t) for t in self.tasks)
@@ -452,11 +550,6 @@ def _maybe_group(tasks):
return tasks
-def _maybe_clone(tasks, app):
- return [s.clone() if isinstance(s, Signature) else signature(s, app=app)
- for s in tasks]
-
-
@Signature.register_type
class group(Signature):
@@ -477,14 +570,58 @@ def from_dict(self, d, app=None):
task['args'] = task._merge(d['args'])[0]
return group(tasks, app=app, **kwdict(d['options']))
- def apply_async(self, args=(), kwargs=None, add_to_parent=True, **options):
- tasks = _maybe_clone(self.tasks, app=self._app)
- if not tasks:
+ def _prepared(self, tasks, partial_args, group_id, root_id):
+ for task in tasks:
+ task = task.clone(partial_args)
+ yield task, task.freeze(group_id=group_id, root_id=root_id)
+
+ def _apply_tasks(self, tasks, producer=None, app=None, **options):
+ app = app or self.app
+ with app.producer_or_acquire(producer) as producer:
+ for sig, res in tasks:
+ sig.apply_async(producer=producer, add_to_parent=False,
+ **options)
+ yield res
+
+ def _freeze_gid(self, options):
+ # remove task_id and use that as the group_id,
+ # if we don't remove it then every task will have the same id...
+ options = dict(self.options, **options)
+ options['group_id'] = group_id = (
+ options.pop('task_id', uuid()))
+ return options, group_id, options.get('root_id')
+
+ def apply_async(self, args=(), kwargs=None, add_to_parent=True,
+ producer=None, **options):
+ app = self.app
+ if app.conf.CELERY_ALWAYS_EAGER:
+ return self.apply(args, kwargs, **options)
+ if not self.tasks:
return self.freeze()
- type = self.type
+
+ options, group_id, root_id = self._freeze_gid(options)
+ tasks = self._prepared(self.tasks, args, group_id, root_id)
+ result = self.app.GroupResult(
+ group_id, list(self._apply_tasks(tasks, producer, app, **options)),
+ )
+ parent_task = get_current_worker_task()
+ if add_to_parent and parent_task:
+ parent_task.add_trail(result)
+ return result
+
return type(*type.prepare(dict(self.options, **options), tasks, args),
add_to_parent=add_to_parent)
+ def apply(self, args=(), kwargs={}, **options):
+ app = self.app
+ if not self.tasks:
+ return self.freeze() # empty group returns GroupResult
+ options, group_id, root_id = self._freeze_gid(options)
+ tasks = self._prepared(self.tasks, args, group_id, root_id)
+ return app.GroupResult(group_id, [
+ sig.apply(**options) for sig, _ in tasks
+ ])
+
def set_immutable(self, immutable):
for task in self.tasks:
task.set_immutable(immutable)
@@ -498,15 +635,10 @@ def link_error(self, sig):
sig = sig.clone().set(immutable=True)
return self.tasks[0].link_error(sig)
- def apply(self, *args, **kwargs):
- if not self.tasks:
- return self.freeze() # empty group returns GroupResult
- return Signature.apply(self, *args, **kwargs)
-
def __call__(self, *partial_args, **options):
return self.apply_async(partial_args, **options)
- def freeze(self, _id=None, group_id=None, chord=None):
+ def freeze(self, _id=None, group_id=None, chord=None, root_id=None):
opts = self.options
try:
gid = opts['task_id']
@@ -516,10 +648,13 @@ def freeze(self, _id=None, group_id=None, chord=None):
opts['group_id'] = group_id
if chord:
opts['chord'] = group_id
+ root_id = opts.setdefault('root_id', root_id)
new_tasks, results = [], []
for task in self.tasks:
task = maybe_signature(task, app=self._app).clone()
- results.append(task.freeze(group_id=group_id, chord=chord))
+ results.append(task.freeze(
+ group_id=group_id, chord=chord, root_id=root_id,
+ ))
new_tasks.append(task)
self.tasks = self.kwargs['tasks'] = new_tasks
return self.app.GroupResult(gid, results)
@@ -538,14 +673,14 @@ def __repr__(self):
return repr(self.tasks)
@property
- def type(self):
- if self._type:
- return self._type
- # taking the app from the first task in the list, there may be a
- # better solution for this, e.g. to consolidate tasks with the same
- # app and apply them in batches.
- app = self._app if self._app else self.tasks[0].type.app
- return app.tasks[self['task']]
+ def app(self):
+ app = self._app
+ if app is None:
+ try:
+ app = self.tasks[0]._app
+ except (KeyError, IndexError):
+ pass
+ return app if app is not None else current_app
@Signature.register_type
@@ -560,8 +695,8 @@ def __init__(self, header, body=None, task='celery.chord',
)
self.subtask_type = 'chord'
- def freeze(self, _id=None, group_id=None, chord=None):
- return self.body.freeze(_id, group_id=group_id, chord=chord)
+ def freeze(self, *args, **kwargs):
+ return self.body.freeze(*args, **kwargs)
@classmethod
def from_dict(self, d, app=None):
@@ -574,20 +709,14 @@ def _unpack_args(header=None, body=None, **kwargs):
# than manually popping things off.
return (header, body), kwargs
- @property
- def type(self):
- if self._type:
- return self._type
- # we will be able to fix this mess in 3.2 when we no longer
- # require an actual task implementation for chord/group
- if self._app:
- app = self._app
- else:
- try:
- app = self.tasks[0].type.app
- except IndexError:
- app = self.body.type.app
- return app.tasks['celery.chord']
+ @cached_property
+ def app(self):
+ app = self._app
+ if app is None:
+ app = self.tasks[0]._app
+ if app is None:
+ app = self.body._app
+ return app if app is not None else current_app
def apply_async(self, args=(), kwargs={}, task_id=None,
producer=None, publisher=None, connection=None,
@@ -595,14 +724,41 @@ def apply_async(self, args=(), kwargs={}, task_id=None,
body = kwargs.get('body') or self.kwargs['body']
kwargs = dict(self.kwargs, **kwargs)
body = body.clone(**options)
+ app = self.app
+ tasks = (self.tasks.clone() if isinstance(self.tasks, group)
+ else group(self.tasks))
+ if app.conf.CELERY_ALWAYS_EAGER:
+ return self.apply((), kwargs,
+ body=body, task_id=task_id, **options)
+ return self.run(tasks, body, args, task_id=task_id, **options)
+
+ def apply(self, args=(), kwargs={}, propagate=True, body=None, **options):
+ body = self.body if body is None else body
+ tasks = (self.tasks.clone() if isinstance(self.tasks, group)
+ else group(self.tasks))
+ return body.apply(
+ args=(tasks.apply().get(propagate=propagate), ),
+ )
- _chord = self.type
- if _chord.app.conf.CELERY_ALWAYS_EAGER:
- return self.apply((), kwargs, task_id=task_id, **options)
- res = body.freeze(task_id)
- parent = _chord(self.tasks, body, args, **options)
- res.parent = parent
- return res
+ def run(self, header, body, partial_args, app=None, interval=None,
+ countdown=1, max_retries=None, propagate=None, eager=False,
+ task_id=None, **options):
+ app = app or self.app
+ propagate = (app.conf.CELERY_CHORD_PROPAGATES
+ if propagate is None else propagate)
+ group_id = uuid()
+ root_id = body.options.get('root_id')
+ body.setdefault('chord_size', len(header.tasks))
+ results = header.freeze(
+ group_id=group_id, chord=body, root_id=root_id).results
+ bodyres = body.freeze(task_id, root_id=root_id)
+
+ parent = app.backend.apply_chord(
+ header, partial_args, group_id, body,
+ interval=interval, countdown=countdown,
+ max_retries=max_retries, propagate=propagate, result=results)
+ bodyres.parent = parent
+ return bodyres
def __call__(self, body=None, **options):
return self.apply_async((), {'body': body} if body else {}, **options)
diff --git a/celery/tests/app/test_builtins.py b/celery/tests/app/test_builtins.py
index 9b00c1a25..305877f47 100644
--- a/celery/tests/app/test_builtins.py
+++ b/celery/tests/app/test_builtins.py
@@ -136,18 +136,18 @@ def test_apply_async(self):
def test_group_to_chord(self):
c = (
- group(self.add.s(i, i) for i in range(5)) |
+ group([self.add.s(i, i) for i in range(5)], app=self.app) |
self.add.s(10) |
self.add.s(20) |
self.add.s(30)
)
- tasks, _ = c.type.prepare_steps((), c.tasks)
+ tasks, _ = c.prepare_steps((), c.tasks)
self.assertIsInstance(tasks[0], chord)
self.assertTrue(tasks[0].body.options['link'])
self.assertTrue(tasks[0].body.options['link'][0].options['link'])
c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10))
- tasks2, _ = c2.type.prepare_steps((), c2.tasks)
+ tasks2, _ = c2.prepare_steps((), c2.tasks)
self.assertIsInstance(tasks2[1], group)
def test_apply_options(self):
@@ -158,7 +158,7 @@ def clone(self, *args, **kwargs):
return self
def s(*args, **kwargs):
- return static(self.add, args, kwargs, type=self.add)
+ return static(self.add, args, kwargs, type=self.add, app=self.app)
c = s(2, 2) | s(4, 4) | s(8, 8)
r1 = c.apply_async(task_id='some_id')
@@ -196,18 +196,16 @@ def test_run_header_not_group(self):
def test_forward_options(self):
body = self.xsum.s()
x = chord([self.add.s(i, i) for i in range(10)], body=body)
- x._type = Mock()
- x._type.app.conf.CELERY_ALWAYS_EAGER = False
+ x.run = Mock(name='chord.run(x)')
x.apply_async(group_id='some_group_id')
- self.assertTrue(x._type.called)
- resbody = x._type.call_args[0][1]
+ self.assertTrue(x.run.called)
+ resbody = x.run.call_args[0][1]
self.assertEqual(resbody.options['group_id'], 'some_group_id')
x2 = chord([self.add.s(i, i) for i in range(10)], body=body)
- x2._type = Mock()
- x2._type.app.conf.CELERY_ALWAYS_EAGER = False
+ x2.run = Mock(name='chord.run(x2)')
x2.apply_async(chord='some_chord_id')
- self.assertTrue(x2._type.called)
- resbody = x2._type.call_args[0][1]
+ self.assertTrue(x2.run.called)
+ resbody = x2.run.call_args[0][1]
self.assertEqual(resbody.options['chord'], 'some_chord_id')
def test_apply_eager(self):
diff --git a/celery/tests/tasks/test_chord.py b/celery/tests/tasks/test_chord.py
index 531d06467..27424a30a 100644
--- a/celery/tests/tasks/test_chord.py
+++ b/celery/tests/tasks/test_chord.py
@@ -205,7 +205,7 @@ def test_apply(self):
m = Mock()
m.app.conf.CELERY_ALWAYS_EAGER = False
m.AsyncResult = AsyncResult
- prev, chord._type = chord._type, m
+ prev, chord.run = chord.run, m
try:
x = chord(self.add.s(i, i) for i in range(10))
body = self.add.s(2)
@@ -214,9 +214,9 @@ def test_apply(self):
# does not modify original signature
with self.assertRaises(KeyError):
body.options['task_id']
- self.assertTrue(chord._type.called)
+ self.assertTrue(chord.run.called)
finally:
- chord._type = prev
+ chord.run = prev
class test_Chord_task(ChordCase):
@@ -227,7 +227,7 @@ def test_run(self):
self.app.backend.cleanup.__name__ = 'cleanup'
Chord = self.app.tasks['celery.chord']
- body = dict()
+ body = self.add.signature()
Chord(group(self.add.signature((i, i)) for i in range(5)), body)
Chord([self.add.signature((j, j)) for j in range(5)], body)
self.assertEqual(self.app.backend.apply_chord.call_count, 2)
diff --git a/docs/internals/protov2.rst b/docs/internals/protov2.rst
index e0bb1ff89..d36936c2e 100644
--- a/docs/internals/protov2.rst
+++ b/docs/internals/protov2.rst
@@ -50,6 +50,7 @@ Notes
- ``correlation_id`` replaces ``task_id`` field.
+- ``root_id`` and ``parent_id`` fields helps keep track of workflows.
- ``c_shadow`` lets you specify a different name for logs, monitors
can be used for e.g. meta tasks that calls any function::
@@ -115,6 +116,8 @@ Definition
'chord': (uuid)chord_id,
'retries': (int)retries,
'timelimit': (tuple)(soft, hard),
+ 'root_id': (uuid)root_id,
+ 'parent_id': (uuid)parent_id,
}
body = (args, kwargs)
From f786b85c14a5df1bf3719422c85739f77690d750 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 8 May 2014 16:50:17 +0100
Subject: [PATCH 0107/1103] docstrings
---
celery/app/builtins.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/celery/app/builtins.py b/celery/app/builtins.py
index f08bf5054..3e5f111c5 100644
--- a/celery/app/builtins.py
+++ b/celery/app/builtins.py
@@ -137,6 +137,7 @@ def chunks(task, it, n):
@connect_on_app_finalize
def add_group_task(app):
+ """No longer used, but here for backwards compatibility."""
_app = app
from celery.canvas import maybe_signature
from celery.result import result_from_tuple
@@ -166,6 +167,7 @@ def run(self, tasks, result, group_id, partial_args,
@connect_on_app_finalize
def add_chain_task(app):
+ """No longer used, but here for backwards compatibility."""
_app = app
class Chain(app.Task):
@@ -179,9 +181,7 @@ class Chain(app.Task):
@connect_on_app_finalize
def add_chord_task(app):
- """Every chord is executed in a dedicated task, so that the chord
- can be used as a signature, and this generates the task
- responsible for that."""
+ """No longer used, but here for backwards compatibility."""
from celery import group, chord as _chord
from celery.canvas import maybe_signature
_app = app
From 1e9dd26592eb2b93f1cb16deb771cfc65ab79612 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 8 May 2014 19:13:42 +0100
Subject: [PATCH 0108/1103] Super refactor, merging everything for 3.2
---
celery/app/amqp.py | 1 +
celery/app/builtins.py | 1 -
celery/app/task.py | 13 +-
celery/app/trace.py | 160 +++++++++++++--
celery/concurrency/asynpool.py | 9 -
celery/concurrency/base.py | 3 +-
celery/tests/tasks/test_trace.py | 8 +-
celery/worker/consumer.py | 25 ++-
celery/worker/control.py | 4 +-
celery/worker/job.py | 296 ++++++++--------------------
celery/worker/loops.py | 4 +-
celery/worker/strategy.py | 24 ++-
docs/internals/protov2.rst | 5 +-
funtests/stress/stress/templates.py | 1 +
14 files changed, 280 insertions(+), 274 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index e3f62b731..b70532cef 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -297,6 +297,7 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
headers={
'lang': 'py',
'c_type': name,
+ 'task_id': task_id,
'eta': eta,
'expires': expires,
'callbacks': callbacks,
diff --git a/celery/app/builtins.py b/celery/app/builtins.py
index 3e5f111c5..81d5f074c 100644
--- a/celery/app/builtins.py
+++ b/celery/app/builtins.py
@@ -64,7 +64,6 @@ def unlock_chord(group_id, callback, interval=None, propagate=None,
# check if the task group is ready, and if so apply the callback.
callback = maybe_signature(callback, app)
- root_id = callback.options.get('root_id')
deps = GroupResult(
group_id,
[result_from_tuple(r, app=app) for r in result],
diff --git a/celery/app/task.py b/celery/app/task.py
index 4687f29d2..705c26269 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -695,7 +695,7 @@ def apply(self, args=None, kwargs=None,
"""
# trace imports Task, so need to import inline.
- from celery.app.trace import eager_trace_task
+ from celery.app.trace import build_tracer
app = self._get_app()
args = args or ()
@@ -736,12 +736,15 @@ def apply(self, args=None, kwargs=None,
kwargs.update(extend_with)
tb = None
- retval, info = eager_trace_task(task, task_id, args, kwargs,
- app=self._get_app(),
- request=request, propagate=throw)
+ tracer = build_tracer(
+ task.name, task, eager=True,
+ propagate=throw, app=self._get_app(),
+ )
+ ret = tracer(task_id, args, kwargs, request)
+ retval = ret.retval
if isinstance(retval, ExceptionInfo):
retval, tb = retval.exception, retval.traceback
- state = states.SUCCESS if info is None else info.state
+ state = states.SUCCESS if ret.info is None else ret.info.state
return EagerResult(task_id, retval, state, traceback=tb)
def AsyncResult(self, task_id, **kwargs):
diff --git a/celery/app/trace.py b/celery/app/trace.py
index 45e24c170..03e07423e 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -15,33 +15,68 @@
# but in the end it only resulted in bad performance and horrible tracebacks,
# so instead we now use one closure per task class.
+import logging
import os
import socket
import sys
+from collections import namedtuple
from warnings import warn
from billiard.einfo import ExceptionInfo
from kombu.exceptions import EncodeError
-from kombu.utils import kwdict
+from kombu.serialization import decode as decode_message
+from kombu.utils.encoding import safe_repr, safe_str
from celery import current_app, group
from celery import states, signals
from celery._state import _task_stack
from celery.app import set_default_app
from celery.app.task import Task as BaseTask, Context
-from celery.exceptions import Ignore, Reject, Retry
+from celery.exceptions import Ignore, Reject, Retry, InvalidTaskError
+from celery.five import monotonic
from celery.utils.log import get_logger
from celery.utils.objects import mro_lookup
from celery.utils.serialization import (
- get_pickleable_exception,
- get_pickleable_etype,
+ get_pickleable_exception, get_pickled_exception, get_pickleable_etype,
)
+from celery.utils.text import truncate
-__all__ = ['TraceInfo', 'build_tracer', 'trace_task', 'eager_trace_task',
+__all__ = ['TraceInfo', 'build_tracer', 'trace_task',
'setup_worker_optimizations', 'reset_worker_optimizations']
-_logger = get_logger(__name__)
+logger = get_logger(__name__)
+info = logger.info
+
+#: Format string used to log task success.
+LOG_SUCCESS = """\
+Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s\
+"""
+
+#: Format string used to log task failure.
+LOG_FAILURE = """\
+Task %(name)s[%(id)s] %(description)s: %(exc)s\
+"""
+
+#: Format string used to log task internal error.
+LOG_INTERNAL_ERROR = """\
+Task %(name)s[%(id)s] %(description)s: %(exc)s\
+"""
+
+#: Format string used to log task ignored.
+LOG_IGNORED = """\
+Task %(name)s[%(id)s] %(description)s\
+"""
+
+#: Format string used to log task rejected.
+LOG_REJECTED = """\
+Task %(name)s[%(id)s] %(exc)s\
+"""
+
+#: Format string used to log task retry.
+LOG_RETRY = """\
+Task %(name)s[%(id)s] retry: %(exc)s\
+"""
send_prerun = signals.task_prerun.send
send_postrun = signals.task_postrun.send
@@ -59,6 +94,8 @@
_tasks = None
_patched = {}
+trace_ok_t = namedtuple('trace_ok_t', ('retval', 'info', 'runtime', 'retstr'))
+
def task_has_custom(task, attr):
"""Return true if the task or one of its bases
@@ -100,6 +137,10 @@ def handle_retry(self, task, store_errors=True):
task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
signals.task_retry.send(sender=task, request=req,
reason=reason, einfo=einfo)
+ info(LOG_RETRY, {
+ 'id': req.id, 'name': task.name,
+ 'exc': safe_repr(reason.exc),
+ })
return einfo
finally:
del(tb)
@@ -123,14 +164,71 @@ def handle_failure(self, task, store_errors=True):
kwargs=req.kwargs,
traceback=tb,
einfo=einfo)
+ self._log_error(task, einfo)
return einfo
finally:
del(tb)
+ def _log_error(self, task, einfo):
+ req = task.request
+ eobj = einfo.exception = get_pickled_exception(einfo.exception)
+ exception, traceback, exc_info, internal, sargs, skwargs = (
+ safe_repr(eobj),
+ safe_str(einfo.traceback),
+ einfo.exc_info,
+ einfo.internal,
+ safe_repr(req.args),
+ safe_repr(req.kwargs),
+ )
+ if task.throws and isinstance(eobj, task.throws):
+ do_send_mail, severity, exc_info, description = (
+ False, logging.INFO, None, 'raised expected',
+ )
+ else:
+ do_send_mail, severity, description = (
+ True, logging.ERROR, 'raised unexpected',
+ )
+ format = LOG_FAILURE
+
+ if internal:
+ if isinstance(einfo.exception, Reject):
+ format = LOG_REJECTED
+ description = 'rejected'
+ severity = logging.WARN
+ exc_info = einfo
+ elif isinstance(einfo.exception, Ignore):
+ format = LOG_IGNORED
+ description = 'ignored'
+ severity = logging.INFO
+ exc_info = None
+ else:
+ format = LOG_INTERNAL_ERROR
+ description = 'INTERNAL ERROR'
+ severity = logging.CRITICAL
+
+ context = {
+ 'hostname': req.hostname,
+ 'id': req.id,
+ 'name': task.name,
+ 'exc': exception,
+ 'traceback': traceback,
+ 'args': sargs,
+ 'kwargs': skwargs,
+ 'description': description,
+ 'internal': internal,
+ }
+
+ logger.log(severity, format.strip(), context,
+ exc_info=exc_info,
+ extra={'data': context})
+
+ task.send_error_email(context, einfo.exception)
+
def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
Info=TraceInfo, eager=False, propagate=False, app=None,
- IGNORE_STATES=IGNORE_STATES):
+ monotonic=monotonic, truncate=truncate,
+ trace_ok_t=trace_ok_t, IGNORE_STATES=IGNORE_STATES):
"""Return a function that traces task execution; catches all
exceptions and updates result backend with the state and result
@@ -186,6 +284,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
push_task = _task_stack.push
pop_task = _task_stack.pop
on_chord_part_return = backend.on_chord_part_return
+ _does_info = logger.isEnabledFor(logging.INFO)
prerun_receivers = signals.task_prerun.receivers
postrun_receivers = signals.task_postrun.receivers
@@ -209,6 +308,8 @@ def on_error(request, exc, uuid, state=FAILURE, call_errbacks=True):
def trace_task(uuid, args, kwargs, request=None):
# R - is the possibly prepared return value.
# I - is the Info object.
+ # T - runtime
+ # Rstr - textual representation of return value
# retval - is the always unmodified return value.
# state - is the resulting task state.
@@ -216,9 +317,14 @@ def trace_task(uuid, args, kwargs, request=None):
# for performance reasons, and because the function is so long
# we want the main variables (I, and R) to stand out visually from the
# the rest of the variables, so breaking PEP8 is worth it ;)
- R = I = retval = state = None
- kwargs = kwdict(kwargs)
+ R = I = T = Rstr = retval = state = None
+ time_start = monotonic()
try:
+ try:
+ kwargs.items
+ except AttributeError:
+ raise InvalidTaskError(
+ 'Task keyword arguments is not a mapping')
push_task(task)
task_request = Context(request or {}, args=args,
called_directly=False, kwargs=kwargs)
@@ -289,6 +395,13 @@ def trace_task(uuid, args, kwargs, request=None):
task_on_success(retval, uuid, args, kwargs)
if success_receivers:
send_success(sender=task, result=retval)
+ if _does_info:
+ T = monotonic() - time_start
+ Rstr = truncate(safe_repr(R), 256)
+ info(LOG_SUCCESS, {
+ 'id': uuid, 'name': name,
+ 'return_value': Rstr, 'runtime': T,
+ })
# -* POST *-
if state not in IGNORE_STATES:
@@ -314,15 +427,15 @@ def trace_task(uuid, args, kwargs, request=None):
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as exc:
- _logger.error('Process cleanup failed: %r', exc,
- exc_info=True)
+ logger.error('Process cleanup failed: %r', exc,
+ exc_info=True)
except MemoryError:
raise
except Exception as exc:
if eager:
raise
R = report_internal_error(task, exc)
- return R, I
+ return trace_ok_t(R, I, T, Rstr)
return trace_task
@@ -342,16 +455,23 @@ def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts):
trace_task_ret = _trace_task_ret
-def _fast_trace_task(task, uuid, args, kwargs, request={}):
+def _fast_trace_task_v1(task, uuid, args, kwargs, request={}):
# setup_worker_optimizations will point trace_task_ret to here,
# so this is the function used in the worker.
- return _tasks[task].__trace__(uuid, args, kwargs, request)[0]
-
-
-def eager_trace_task(task, uuid, args, kwargs, request=None, **opts):
- opts.setdefault('eager', True)
- return build_tracer(task.name, task, **opts)(
- uuid, args, kwargs, request)
+ R, I, T, Rstr = _tasks[task].__trace__(uuid, args, kwargs, request)[0]
+ # exception instance if error, else result text
+ return (R if I else Rstr), T
+
+
+def _fast_trace_task(task, uuid, request, body, content_type,
+ content_encoding, decode_message=decode_message,
+ **extra_request):
+ args, kwargs = decode_message(body, content_type, content_encoding)
+ request.update(args=args, kwargs=kwargs, **extra_request)
+ R, I, T, Rstr = _tasks[task].__trace__(
+ uuid, args, kwargs, request,
+ )
+ return (R if I else Rstr), T
def report_internal_error(task, exc):
diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
index a3906c492..c2dbb0241 100644
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -37,7 +37,6 @@
from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined
from billiard import pool as _pool
from billiard.compat import buf_t, setblocking, isblocking
-from billiard.einfo import ExceptionInfo
from billiard.queues import _SimpleQueue
from kombu.async import READ, WRITE, ERR
from kombu.serialization import pickle as _pickle
@@ -46,7 +45,6 @@
from kombu.utils.eventio import SELECT_BAD_FD
from celery.five import Counter, items, values
from celery.utils.log import get_logger
-from celery.utils.text import truncate
from celery.worker import state as worker_state
try:
@@ -96,8 +94,6 @@ def unpack_from(fmt, iobuf, unpack=struct.unpack): # noqa
'fair': SCHED_STRATEGY_FAIR,
}
-RESULT_MAXLEN = 128
-
Ack = namedtuple('Ack', ('id', 'fd', 'payload'))
@@ -170,11 +166,6 @@ def on_loop_start(self, pid):
# is writable.
self.outq.put((WORKER_UP, (pid, )))
- def prepare_result(self, result, RESULT_MAXLEN=RESULT_MAXLEN):
- if not isinstance(result, ExceptionInfo):
- return truncate(repr(result), RESULT_MAXLEN)
- return result
-
class ResultHandler(_pool.ResultHandler):
"""Handles messages from the pool processes."""
diff --git a/celery/concurrency/base.py b/celery/concurrency/base.py
index 29c348d6a..24b39a1ee 100644
--- a/celery/concurrency/base.py
+++ b/celery/concurrency/base.py
@@ -66,6 +66,7 @@ class BasePool(object):
_state = None
_pool = None
+ _does_debug = True
#: only used by multiprocessing pool
uses_semaphore = False
@@ -79,7 +80,6 @@ def __init__(self, limit=None, putlocks=True,
self.options = options
self.forking_enable = forking_enable
self.callbacks_propagate = callbacks_propagate
- self._does_debug = logger.isEnabledFor(logging.DEBUG)
def on_start(self):
pass
@@ -128,6 +128,7 @@ def terminate(self):
self.on_terminate()
def start(self):
+ self._does_debug = logger.isEnabledFor(logging.DEBUG)
self.on_start()
self._state = self.RUN
diff --git a/celery/tests/tasks/test_trace.py b/celery/tests/tasks/test_trace.py
index 12c6280ef..0a7ec3345 100644
--- a/celery/tests/tasks/test_trace.py
+++ b/celery/tests/tasks/test_trace.py
@@ -6,7 +6,7 @@
from celery.exceptions import Ignore, Retry
from celery.app.trace import (
TraceInfo,
- eager_trace_task,
+ build_tracer,
trace_task,
setup_worker_optimizations,
reset_worker_optimizations,
@@ -15,8 +15,10 @@
def trace(app, task, args=(), kwargs={}, propagate=False, **opts):
- return eager_trace_task(task, 'id-1', args, kwargs,
- propagate=propagate, app=app, **opts)
+ t = build_tracer(task.name, task,
+ eager=True, propagate=propagate, app=app, **opts)
+ ret = t('id-1', args, kwargs, None)
+ return ret.retval, ret.info
class TraceCase(AppCase):
diff --git a/celery/worker/consumer.py b/celery/worker/consumer.py
index c761d043a..33facabd1 100644
--- a/celery/worker/consumer.py
+++ b/celery/worker/consumer.py
@@ -127,6 +127,8 @@ class buffer_t(object): # noqa
def dump_body(m, body):
+ # v2 protocol does not deserialize body
+ body = m.body if body is None else body
if isinstance(body, buffer_t):
body = bytes_t(body)
return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024),
@@ -445,7 +447,7 @@ def create_task_handler(self):
on_invalid_task = self.on_invalid_task
callbacks = self.on_task_message
- def on_task_received(body, message):
+ def on_v1_task_received(body, message):
try:
name = body['task']
except (KeyError, TypeError):
@@ -461,6 +463,22 @@ def on_task_received(body, message):
except InvalidTaskError as exc:
on_invalid_task(body, message, exc)
+ def on_task_received(message):
+ headers = message.headers
+ try:
+ type_ = headers['c_type']
+ except KeyError:
+ return on_v1_task_received(message.payload, message)
+ try:
+ strategies[type_](
+ message, None,
+ message.ack_log_error, message.reject_log_error, callbacks,
+ )
+ except KeyError as exc:
+ on_unknown_task(None, message, exc)
+ except InvalidTaskError as exc:
+ on_invalid_task(None, message, exc)
+
return on_task_received
def __repr__(self):
@@ -541,8 +559,9 @@ def __init__(self, c, without_heartbeat=False, heartbeat_interval=None,
c.heart = None
def start(self, c):
- c.heart = heartbeat.Heart(c.timer, c.event_dispatcher,
- self.heartbeat_interval)
+ c.heart = heartbeat.Heart(
+ c.timer, c.event_dispatcher, self.heartbeat_interval,
+ )
c.heart.start()
def stop(self, c):
diff --git a/celery/worker/control.py b/celery/worker/control.py
index 6016543c7..2067d4043 100644
--- a/celery/worker/control.py
+++ b/celery/worker/control.py
@@ -364,7 +364,9 @@ def active_queues(state):
def _wanted_config_key(key):
- return isinstance(key, string_t) and key.isupper() and not key.startswith('__')
+ return (isinstance(key, string_t) and
+ key.isupper() and
+ not key.startswith('__'))
@Panel.register
diff --git a/celery/worker/job.py b/celery/worker/job.py
index 8522d0091..74278cc1e 100644
--- a/celery/worker/job.py
+++ b/celery/worker/job.py
@@ -17,7 +17,6 @@
from datetime import datetime
from weakref import ref
-from kombu.utils import kwdict, reprcall
from kombu.utils.encoding import safe_repr, safe_str
from celery import signals
@@ -27,14 +26,12 @@
SoftTimeLimitExceeded, TimeLimitExceeded,
WorkerLostError, Terminated, Retry, Reject,
)
-from celery.five import items, monotonic, string, string_t
+from celery.five import string
from celery.platforms import signals as _signals
-from celery.utils import fun_takes_kwargs
from celery.utils.functional import noop
from celery.utils.log import get_logger
-from celery.utils.serialization import get_pickled_exception
-from celery.utils.text import truncate
from celery.utils.timeutils import maybe_iso8601, timezone, maybe_make_aware
+from celery.utils.serialization import get_pickled_exception
from . import state
@@ -69,8 +66,6 @@ def __optimize__():
task_ready = state.task_ready
revoked_tasks = state.revoked
-NEEDS_KWDICT = sys.version_info <= (2, 6)
-
#: Use when no message object passed to :class:`Request`.
DEFAULT_FIELDS = {
'headers': None,
@@ -85,63 +80,46 @@ def __optimize__():
}
+class RequestV1(object):
+ if not IS_PYPY:
+ __slots__ = (
+ 'app', 'name', 'id', 'root_id', 'parent_id',
+ 'on_ack', 'hostname', 'eventer', 'connection_errors', 'task',
+ 'eta', 'expires', 'request_dict', 'acknowledged', 'on_reject',
+ 'utc', 'time_start', 'worker_pid', '_already_revoked',
+ '_terminate_on_ack', '_apply_result',
+ '_tzlocal', '__weakref__', '__dict__',
+ )
+
+
class Request(object):
"""A request for task execution."""
if not IS_PYPY: # pragma: no cover
__slots__ = (
- 'app', 'name', 'id', 'args', 'kwargs', 'on_ack',
+ 'app', 'name', 'id', 'on_ack', 'payload',
'hostname', 'eventer', 'connection_errors', 'task', 'eta',
'expires', 'request_dict', 'acknowledged', 'on_reject',
- 'utc', 'time_start', 'worker_pid', '_already_revoked',
- '_terminate_on_ack', '_apply_result',
+ 'utc', 'time_start', 'worker_pid', 'timeouts',
+ 'content_type', 'content_encoding',
+ '_already_revoked', '_terminate_on_ack', '_apply_result',
'_tzlocal', '__weakref__', '__dict__',
)
- #: Format string used to log task success.
- success_msg = """\
- Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s
- """
-
- #: Format string used to log task failure.
- error_msg = """\
- Task %(name)s[%(id)s] %(description)s: %(exc)s
- """
-
- #: Format string used to log internal error.
- internal_error_msg = """\
- Task %(name)s[%(id)s] %(description)s: %(exc)s
- """
-
- ignored_msg = """\
- Task %(name)s[%(id)s] %(description)s
- """
-
- rejected_msg = """\
- Task %(name)s[%(id)s] %(exc)s
- """
-
- #: Format string used to log task retry.
- retry_msg = """Task %(name)s[%(id)s] retry: %(exc)s"""
-
- def __init__(self, body, on_ack=noop,
+ def __init__(self, message, on_ack=noop,
hostname=None, eventer=None, app=None,
connection_errors=None, request_dict=None,
- message=None, task=None, on_reject=noop, **opts):
+ task=None, on_reject=noop, **opts):
+ headers = message.headers
self.app = app
- name = self.name = body['task']
- self.id = body['id']
- self.args = body.get('args', [])
- self.kwargs = body.get('kwargs', {})
- try:
- self.kwargs.items
- except AttributeError:
- raise InvalidTaskError(
- 'Task keyword arguments is not a mapping')
- if NEEDS_KWDICT:
- self.kwargs = kwdict(self.kwargs)
- eta = body.get('eta')
- expires = body.get('expires')
- utc = self.utc = body.get('utc', False)
+ name = self.name = headers['c_type']
+ self.id = headers['task_id']
+ self.payload = message.body
+ self.content_type = message.content_type
+ self.content_encoding = message.content_encoding
+ eta = headers.get('eta')
+ expires = headers.get('expires')
+ self.timeouts = (headers['timeouts'] if 'timeouts' in headers
+ else (None, None))
self.on_ack = on_ack
self.on_reject = on_reject
self.hostname = hostname or socket.gethostname()
@@ -157,75 +135,42 @@ def __init__(self, body, on_ack=noop,
# supported at this point is UTC.
if eta is not None:
try:
- self.eta = maybe_iso8601(eta)
+ eta = maybe_iso8601(eta)
except (AttributeError, ValueError, TypeError) as exc:
raise InvalidTaskError(
'invalid eta value {0!r}: {1}'.format(eta, exc))
- if utc:
- self.eta = maybe_make_aware(self.eta, self.tzlocal)
+ self.eta = maybe_make_aware(eta, self.tzlocal)
else:
self.eta = None
if expires is not None:
try:
- self.expires = maybe_iso8601(expires)
+ expires = maybe_iso8601(expires)
except (AttributeError, ValueError, TypeError) as exc:
raise InvalidTaskError(
'invalid expires value {0!r}: {1}'.format(expires, exc))
- if utc:
- self.expires = maybe_make_aware(self.expires, self.tzlocal)
+ self.expires = maybe_make_aware(expires, self.tzlocal)
else:
self.expires = None
- if message:
- delivery_info = message.delivery_info or {}
- properties = message.properties or {}
- body.update({
- 'headers': message.headers,
- 'reply_to': properties.get('reply_to'),
- 'correlation_id': properties.get('correlation_id'),
- 'delivery_info': {
- 'exchange': delivery_info.get('exchange'),
- 'routing_key': delivery_info.get('routing_key'),
- 'priority': delivery_info.get('priority'),
- 'redelivered': delivery_info.get('redelivered'),
- }
-
- })
- else:
- body.update(DEFAULT_FIELDS)
- self.request_dict = body
+ delivery_info = message.delivery_info or {}
+ properties = message.properties or {}
+ headers.update({
+ 'reply_to': properties.get('reply_to'),
+ 'correlation_id': properties.get('correlation_id'),
+ 'delivery_info': {
+ 'exchange': delivery_info.get('exchange'),
+ 'routing_key': delivery_info.get('routing_key'),
+ 'priority': delivery_info.get('priority'),
+ 'redelivered': delivery_info.get('redelivered'),
+ }
+
+ })
+ self.request_dict = headers
@property
def delivery_info(self):
return self.request_dict['delivery_info']
- def extend_with_default_kwargs(self):
- """Extend the tasks keyword arguments with standard task arguments.
-
- Currently these are `logfile`, `loglevel`, `task_id`,
- `task_name`, `task_retries`, and `delivery_info`.
-
- See :meth:`celery.task.base.Task.run` for more information.
-
- Magic keyword arguments are deprecated and will be removed
- in version 4.0.
-
- """
- kwargs = dict(self.kwargs)
- default_kwargs = {'logfile': None, # deprecated
- 'loglevel': None, # deprecated
- 'task_id': self.id,
- 'task_name': self.name,
- 'task_retries': self.request_dict.get('retries', 0),
- 'task_is_eager': False,
- 'delivery_info': self.delivery_info}
- fun = self.task.run
- supported_keys = fun_takes_kwargs(fun, default_kwargs)
- extend_with = {key: val for key, val in items(default_kwargs)
- if key in supported_keys}
- kwargs.update(extend_with)
- return kwargs
-
def execute_using_pool(self, pool, **kwargs):
"""Used by the worker to send this task to the pool.
@@ -235,32 +180,28 @@ def execute_using_pool(self, pool, **kwargs):
and ignored.
"""
- uuid = self.id
+ task_id = self.id
task = self.task
if self.revoked():
- raise TaskRevokedError(uuid)
+ raise TaskRevokedError(task_id)
- hostname = self.hostname
- kwargs = self.kwargs
- if task.accept_magic_kwargs:
- kwargs = self.extend_with_default_kwargs()
- request = self.request_dict
- request.update({'hostname': hostname, 'is_eager': False,
- 'delivery_info': self.delivery_info,
- 'group': self.request_dict.get('taskset')})
- timeout, soft_timeout = request.get('timelimit', (None, None))
+ payload = self.payload
+ timeout, soft_timeout = self.timeouts
timeout = timeout or task.time_limit
soft_timeout = soft_timeout or task.soft_time_limit
result = pool.apply_async(
trace_task_ret,
- args=(self.name, uuid, self.args, kwargs, request),
+ args=(self.name, task_id, self.request_dict,
+ bytes(payload) if isinstance(payload, buffer) else payload,
+ self.content_type, self.content_encoding),
+ kwargs={'hostname': self.hostname, 'is_eager': False},
accept_callback=self.on_accepted,
timeout_callback=self.on_timeout,
callback=self.on_success,
error_callback=self.on_failure,
- soft_timeout=soft_timeout,
- timeout=timeout,
- correlation_id=uuid,
+ soft_timeout=soft_timeout or task.soft_time_limit,
+ timeout=timeout or task.time_limit,
+ correlation_id=task_id,
)
# cannot create weakref to None
self._apply_result = ref(result) if result is not None else result
@@ -281,8 +222,6 @@ def execute(self, loglevel=None, logfile=None):
self.acknowledge()
kwargs = self.kwargs
- if self.task.accept_magic_kwargs:
- kwargs = self.extend_with_default_kwargs()
request = self.request_dict
request.update({'loglevel': loglevel, 'logfile': logfile,
'hostname': self.hostname, 'is_eager': False,
@@ -374,7 +313,7 @@ def on_timeout(self, soft, timeout):
if self.task.acks_late:
self.acknowledge()
- def on_success(self, ret_value, now=None, nowfun=monotonic):
+ def on_success(self, ret_value, **kwargs):
"""Handler called if the task was successfully processed."""
if isinstance(ret_value, ExceptionInfo):
if isinstance(ret_value.exception, (
@@ -387,18 +326,10 @@ def on_success(self, ret_value, now=None, nowfun=monotonic):
self.acknowledge()
if self.eventer and self.eventer.enabled:
- now = nowfun()
- runtime = self.time_start and (now - self.time_start) or 0
- self.send_event('task-succeeded',
- result=safe_repr(ret_value), runtime=runtime)
-
- if _does_info:
- now = now or nowfun()
- runtime = self.time_start and (now - self.time_start) or 0
- info(self.success_msg.strip(), {
- 'id': self.id, 'name': self.name,
- 'return_value': self.repr_result(ret_value),
- 'runtime': runtime})
+ result, runtime = ret_value
+ self.send_event(
+ 'task-succeeded', result=ret_value, runtime=runtime,
+ )
def on_retry(self, exc_info):
"""Handler called if the task should be retried."""
@@ -409,17 +340,19 @@ def on_retry(self, exc_info):
exception=safe_repr(exc_info.exception.exc),
traceback=safe_str(exc_info.traceback))
- if _does_info:
- info(self.retry_msg.strip(),
- {'id': self.id, 'name': self.name,
- 'exc': exc_info.exception})
-
def on_failure(self, exc_info):
"""Handler called if the task raised an exception."""
task_ready(self)
send_failed_event = True
- if not exc_info.internal:
+ if exc_info.internal:
+ if isinstance(exc_info.exception, MemoryError):
+ raise MemoryError('Process got: %s' % (exc_info.exception, ))
+ elif isinstance(exc_info.exception, Reject):
+ self.reject(requeue=exc_info.exception.requeue)
+ elif isinstance(exc_info.exception, Ignore):
+ self.acknowledge()
+ else:
exc = exc_info.exception
if isinstance(exc, Retry):
@@ -439,77 +372,14 @@ def on_failure(self, exc_info):
# (acks_late) acknowledge after result stored.
if self.task.acks_late:
self.acknowledge()
- self._log_error(exc_info, send_failed_event=send_failed_event)
-
- def _log_error(self, einfo, send_failed_event=True):
- einfo.exception = get_pickled_exception(einfo.exception)
- eobj = einfo.exception
- exception, traceback, exc_info, internal, sargs, skwargs = (
- safe_repr(eobj),
- safe_str(einfo.traceback),
- einfo.exc_info,
- einfo.internal,
- safe_repr(self.args),
- safe_repr(self.kwargs),
- )
- task = self.task
- if task.throws and isinstance(eobj, task.throws):
- do_send_mail, severity, exc_info, description = (
- False, logging.INFO, None, 'raised expected',
- )
- else:
- do_send_mail, severity, description = (
- True, logging.ERROR, 'raised unexpected',
- )
- format = self.error_msg
+
if send_failed_event:
self.send_event(
- 'task-failed', exception=exception, traceback=traceback,
+ 'task-failed',
+ exception=safe_repr(get_pickled_exception(exc_info.exception)),
+ traceback=exc_info.traceback,
)
- if internal:
- if isinstance(einfo.exception, MemoryError):
- raise MemoryError('Process got: %s' % (einfo.exception, ))
- elif isinstance(einfo.exception, Reject):
- format = self.rejected_msg
- description = 'rejected'
- severity = logging.WARN
- exc_info = einfo
- self.reject(requeue=einfo.exception.requeue)
- elif isinstance(einfo.exception, Ignore):
- format = self.ignored_msg
- description = 'ignored'
- severity = logging.INFO
- exc_info = None
- self.acknowledge()
- else:
- format = self.internal_error_msg
- description = 'INTERNAL ERROR'
- severity = logging.CRITICAL
-
- context = {
- 'hostname': self.hostname,
- 'id': self.id,
- 'name': self.name,
- 'exc': exception,
- 'traceback': traceback,
- 'args': sargs,
- 'kwargs': skwargs,
- 'description': description,
- }
-
- logger.log(severity, format.strip(), context,
- exc_info=exc_info,
- extra={'data': {'id': self.id,
- 'name': self.name,
- 'args': sargs,
- 'kwargs': skwargs,
- 'hostname': self.hostname,
- 'internal': internal}})
-
- if do_send_mail:
- task.send_error_email(context, einfo.exception)
-
def acknowledge(self):
"""Acknowledge task."""
if not self.acknowledged:
@@ -521,18 +391,10 @@ def reject(self, requeue=False):
self.on_reject(logger, self.connection_errors, requeue)
self.acknowledged = True
- def repr_result(self, result, maxlen=RESULT_MAXLEN):
- # 46 is the length needed to fit
- # 'the quick brown fox jumps over the lazy dog' :)
- if not isinstance(result, string_t):
- result = safe_repr(result)
- return truncate(result) if len(result) > maxlen else result
-
def info(self, safe=False):
return {'id': self.id,
'name': self.name,
- 'args': self.args if safe else safe_repr(self.args),
- 'kwargs': self.kwargs if safe else safe_repr(self.kwargs),
+ 'body': self.body,
'hostname': self.hostname,
'time_start': self.time_start,
'acknowledged': self.acknowledged,
@@ -546,9 +408,7 @@ def __str__(self):
shortinfo = __str__
def __repr__(self):
- return '<{0} {1}: {2}>'.format(
- type(self).__name__, self.id,
- reprcall(self.name, self.args, self.kwargs))
+ return '<{0} {1}: {2}>'.format(type(self).__name__, self.id, self.name)
@property
def tzlocal(self):
diff --git a/celery/worker/loops.py b/celery/worker/loops.py
index 0891f51a6..406633e00 100644
--- a/celery/worker/loops.py
+++ b/celery/worker/loops.py
@@ -37,7 +37,7 @@ def asynloop(obj, connection, consumer, blueprint, hub, qos,
if heartbeat and connection.supports_heartbeats:
hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate)
- consumer.callbacks = [on_task_received]
+ consumer.on_message = on_task_received
consumer.consume()
obj.on_ready()
obj.controller.register_with_event_loop(hub)
@@ -86,7 +86,7 @@ def synloop(obj, connection, consumer, blueprint, hub, qos,
"""Fallback blocking event loop for transports that doesn't support AIO."""
on_task_received = obj.create_task_handler()
- consumer.register_callback(on_task_received)
+ consumer.on_message = on_task_received
consumer.consume()
obj.on_ready()
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py
index 0b0d327c3..19a31ef90 100644
--- a/celery/worker/strategy.py
+++ b/celery/worker/strategy.py
@@ -11,12 +11,11 @@
import logging
from kombu.async.timer import to_timestamp
-from kombu.utils.encoding import safe_repr
from celery.utils.log import get_logger
from celery.utils.timeutils import timezone
-from .job import Request
+from .job import Request, RequestV1
from .state import task_reserved
__all__ = ['default']
@@ -29,7 +28,8 @@ def default(task, app, consumer,
to_system_tz=timezone.to_system):
hostname = consumer.hostname
eventer = consumer.event_dispatcher
- Req = Request
+ ReqV2 = Request
+ ReqV1 = RequestV1
connection_errors = consumer.connection_errors
_does_info = logger.isEnabledFor(logging.INFO)
events = eventer and eventer.enabled
@@ -43,11 +43,17 @@ def default(task, app, consumer,
def task_message_handler(message, body, ack, reject, callbacks,
to_timestamp=to_timestamp):
- req = Req(body, on_ack=ack, on_reject=reject,
- app=app, hostname=hostname,
- eventer=eventer, task=task,
- connection_errors=connection_errors,
- message=message)
+ if body is None:
+ req = ReqV2(message,
+ on_ack=ack, on_reject=reject, app=app,
+ hostname=hostname, eventer=eventer, task=task,
+ connection_errors=connection_errors)
+ else:
+ req = ReqV1(body,
+ on_ack=ack, on_reject=reject, app=app,
+ hostname=hostname, eventer=eventer, task=task,
+ connection_errors=connection_errors,
+ message=message)
if req.revoked():
return
@@ -58,7 +64,7 @@ def task_message_handler(message, body, ack, reject, callbacks,
send_event(
'task-received',
uuid=req.id, name=req.name,
- args=safe_repr(req.args), kwargs=safe_repr(req.kwargs),
+ args='', kwargs='',
retries=req.request_dict.get('retries', 0),
eta=req.eta and req.eta.isoformat(),
expires=req.expires and req.expires.isoformat(),
diff --git a/docs/internals/protov2.rst b/docs/internals/protov2.rst
index d36936c2e..f3c8359ca 100644
--- a/docs/internals/protov2.rst
+++ b/docs/internals/protov2.rst
@@ -103,6 +103,9 @@ Definition
headers = {
'lang': (string)'py'
'c_type': (string)task,
+ 'task_id': (uuid)task_id,
+ 'root_id': (uuid)root_id,
+ 'parent_id': (uuid)parent_id,
# optional
'c_meth': (string)unused,
@@ -116,8 +119,6 @@ Definition
'chord': (uuid)chord_id,
'retries': (int)retries,
'timelimit': (tuple)(soft, hard),
- 'root_id': (uuid)root_id,
- 'parent_id': (uuid)parent_id,
}
body = (args, kwargs)
diff --git a/funtests/stress/stress/templates.py b/funtests/stress/stress/templates.py
index 931269807..520481108 100644
--- a/funtests/stress/stress/templates.py
+++ b/funtests/stress/stress/templates.py
@@ -70,6 +70,7 @@ class default(object):
'interval_max': 2,
'interval_step': 0.1,
}
+ CELERY_TASK_PROTOCOL = 2
@template()
From e442df61b2ff1fe855881c1e2ff9acc970090f54 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 9 May 2014 17:42:40 +0100
Subject: [PATCH 0109/1103] Tests passing, more refactoring
---
celery/app/amqp.py | 2 +-
celery/app/trace.py | 109 +++++---
celery/events/state.py | 6 +-
celery/tests/case.py | 42 ++-
celery/tests/tasks/test_trace.py | 7 +-
celery/tests/worker/test_control.py | 27 +-
celery/tests/worker/test_loops.py | 43 ++-
celery/tests/worker/test_request.py | 400 ++++++++++-----------------
celery/tests/worker/test_strategy.py | 13 +-
celery/tests/worker/test_worker.py | 138 +++++----
celery/worker/autoscale.py | 2 +-
celery/worker/consumer.py | 35 +--
celery/worker/job.py | 96 ++++---
celery/worker/strategy.py | 2 +-
docs/internals/protov2.rst | 4 +-
15 files changed, 430 insertions(+), 496 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index b70532cef..e8e801529 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -297,7 +297,7 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
headers={
'lang': 'py',
'c_type': name,
- 'task_id': task_id,
+ 'id': task_id,
'eta': eta,
'expires': expires,
'callbacks': callbacks,
diff --git a/celery/app/trace.py b/celery/app/trace.py
index 03e07423e..60776b9ff 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -25,7 +25,7 @@
from billiard.einfo import ExceptionInfo
from kombu.exceptions import EncodeError
-from kombu.serialization import decode as decode_message
+from kombu.serialization import loads as loads_message, prepare_accept_content
from kombu.utils.encoding import safe_repr, safe_str
from celery import current_app, group
@@ -78,6 +78,22 @@
Task %(name)s[%(id)s] retry: %(exc)s\
"""
+log_policy_t = namedtuple(
+ 'log_policy_t', ('format', 'description', 'severity', 'traceback', 'mail'),
+)
+
+log_policy_reject = log_policy_t(LOG_REJECTED, 'rejected', logging.WARN, 1, 1)
+log_policy_ignore = log_policy_t(LOG_IGNORED, 'ignored', logging.INFO, 0, 0)
+log_policy_internal = log_policy_t(
+ LOG_INTERNAL_ERROR, 'INTERNAL ERROR', logging.CRITICAL, 1, 1,
+)
+log_policy_expected = log_policy_t(
+ LOG_FAILURE, 'raised expected', logging.INFO, 0, 0,
+)
+log_policy_unexpected = log_policy_t(
+ LOG_FAILURE, 'raised unexpected', logging.ERROR, 1, 1,
+)
+
send_prerun = signals.task_prerun.send
send_postrun = signals.task_postrun.send
send_success = signals.task_success.send
@@ -91,7 +107,7 @@
IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED])
#: set by :func:`setup_worker_optimizations`
-_tasks = None
+_localized = []
_patched = {}
trace_ok_t = namedtuple('trace_ok_t', ('retval', 'info', 'runtime', 'retstr'))
@@ -104,6 +120,19 @@ def task_has_custom(task, attr):
monkey_patched=['celery.app.task'])
+def get_log_policy(task, einfo, exc):
+ if isinstance(exc, Reject):
+ return log_policy_reject
+ elif isinstance(exc, Ignore):
+ return log_policy_ignore
+ elif einfo.internal:
+ return log_policy_internal
+ else:
+ if task.throws and isinstance(exc, task.throws):
+ return log_policy_expected
+ return log_policy_unexpected
+
+
class TraceInfo(object):
__slots__ = ('state', 'retval')
@@ -172,39 +201,14 @@ def handle_failure(self, task, store_errors=True):
def _log_error(self, task, einfo):
req = task.request
eobj = einfo.exception = get_pickled_exception(einfo.exception)
- exception, traceback, exc_info, internal, sargs, skwargs = (
+ exception, traceback, exc_info, sargs, skwargs = (
safe_repr(eobj),
safe_str(einfo.traceback),
einfo.exc_info,
- einfo.internal,
safe_repr(req.args),
safe_repr(req.kwargs),
)
- if task.throws and isinstance(eobj, task.throws):
- do_send_mail, severity, exc_info, description = (
- False, logging.INFO, None, 'raised expected',
- )
- else:
- do_send_mail, severity, description = (
- True, logging.ERROR, 'raised unexpected',
- )
- format = LOG_FAILURE
-
- if internal:
- if isinstance(einfo.exception, Reject):
- format = LOG_REJECTED
- description = 'rejected'
- severity = logging.WARN
- exc_info = einfo
- elif isinstance(einfo.exception, Ignore):
- format = LOG_IGNORED
- description = 'ignored'
- severity = logging.INFO
- exc_info = None
- else:
- format = LOG_INTERNAL_ERROR
- description = 'INTERNAL ERROR'
- severity = logging.CRITICAL
+ policy = get_log_policy(task, einfo, eobj)
context = {
'hostname': req.hostname,
@@ -214,15 +218,16 @@ def _log_error(self, task, einfo):
'traceback': traceback,
'args': sargs,
'kwargs': skwargs,
- 'description': description,
- 'internal': internal,
+ 'description': policy.description,
+ 'internal': einfo.internal,
}
- logger.log(severity, format.strip(), context,
- exc_info=exc_info,
+ logger.log(policy.severity, policy.format.strip(), context,
+ exc_info=exc_info if policy.traceback else None,
extra={'data': context})
- task.send_error_email(context, einfo.exception)
+ if policy.mail:
+ task.send_error_email(context, einfo.exception)
def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
@@ -444,14 +449,21 @@ def trace_task(task, uuid, args, kwargs, request={}, **opts):
try:
if task.__trace__ is None:
task.__trace__ = build_tracer(task.name, task, **opts)
- return task.__trace__(uuid, args, kwargs, request)[0]
+ return task.__trace__(uuid, args, kwargs, request)
except Exception as exc:
return report_internal_error(task, exc)
-def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts):
- return trace_task((app or current_app).tasks[name],
- uuid, args, kwargs, request, app=app, **opts)
+def _trace_task_ret(name, uuid, request, body, content_type,
+ content_encoding, loads=loads_message, app=None,
+ **extra_request):
+ app = app or current_app._get_current_object()
+ accept = prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT)
+ args, kwargs = loads(body, content_type, content_encoding, accept=accept)
+ request.update(args=args, kwargs=kwargs, **extra_request)
+ R, I, T, Rstr = trace_task(app.tasks[name],
+ uuid, args, kwargs, request, app=app)
+ return (1, R, T) if I else (0, Rstr, T)
trace_task_ret = _trace_task_ret
@@ -460,18 +472,23 @@ def _fast_trace_task_v1(task, uuid, args, kwargs, request={}):
# so this is the function used in the worker.
R, I, T, Rstr = _tasks[task].__trace__(uuid, args, kwargs, request)[0]
# exception instance if error, else result text
- return (R if I else Rstr), T
+ return (1, R, T) if I else (0, Rstr, T)
def _fast_trace_task(task, uuid, request, body, content_type,
- content_encoding, decode_message=decode_message,
+ content_encoding, loads=loads_message, _loc=_localized,
**extra_request):
- args, kwargs = decode_message(body, content_type, content_encoding)
+ tasks, accept = _loc
+ try:
+ args, kwargs = loads(body, content_type, content_encoding,
+ accept=accept)
+ except Exception as exc:
+ print('OH NOEEES: %r' % (exc, ))
request.update(args=args, kwargs=kwargs, **extra_request)
- R, I, T, Rstr = _tasks[task].__trace__(
+ R, I, T, Rstr = tasks[task].__trace__(
uuid, args, kwargs, request,
)
- return (R if I else Rstr), T
+ return (1, R, T) if I else (0, Rstr, T)
def report_internal_error(task, exc):
@@ -488,7 +505,6 @@ def report_internal_error(task, exc):
def setup_worker_optimizations(app):
- global _tasks
global trace_task_ret
# make sure custom Task.__call__ methods that calls super
@@ -508,7 +524,10 @@ def setup_worker_optimizations(app):
app.finalize()
# set fast shortcut to task registry
- _tasks = app._tasks
+ _localized[:] = [
+ app._tasks,
+ prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT),
+ ]
trace_task_ret = _fast_trace_task
from celery.worker import job as job_module
diff --git a/celery/events/state.py b/celery/events/state.py
index 541f72226..2a11891f7 100644
--- a/celery/events/state.py
+++ b/celery/events/state.py
@@ -30,7 +30,7 @@
from weakref import ref
from kombu.clocks import timetuple
-from kombu.utils import cached_property, kwdict
+from kombu.utils import cached_property
from celery import states
from celery.five import class_property, items, values
@@ -54,8 +54,6 @@
%s seconds. [orig: %s recv: %s]
"""
-CAN_KWDICT = sys.version_info >= (2, 6, 5)
-
logger = get_logger(__name__)
warn = logger.warning
@@ -86,7 +84,7 @@ def heartbeat_expires(timestamp, freq=60,
def _depickle_task(cls, fields):
- return cls(**(fields if CAN_KWDICT else kwdict(fields)))
+ return cls(**fields)
def with_unique_field(attr):
diff --git a/celery/tests/case.py b/celery/tests/case.py
index 808347817..520e1f55b 100644
--- a/celery/tests/case.py
+++ b/celery/tests/case.py
@@ -48,7 +48,7 @@
from celery.utils.imports import qualname
__all__ = [
- 'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY',
+ 'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY', 'TaskMessage',
'patch', 'call', 'sentinel', 'skip_unless_module',
'wrap_logger', 'with_environ', 'sleepdeprived',
'skip_if_environ', 'todo', 'skip', 'skip_if',
@@ -56,7 +56,7 @@
'replace_module_value', 'sys_platform', 'reset_modules',
'patch_modules', 'mock_context', 'mock_open', 'patch_many',
'assert_signal_called', 'skip_if_pypy',
- 'skip_if_jython', 'body_from_sig', 'restore_logging',
+ 'skip_if_jython', 'task_message_from_sig', 'restore_logging',
]
patch = mock.patch
call = mock.call
@@ -819,7 +819,7 @@ def _inner(*args, **kwargs):
return _inner
-def body_from_sig(app, sig, utc=True):
+def task_message_from_sig(app, sig, utc=True):
sig.freeze()
callbacks = sig.options.pop('link', None)
errbacks = sig.options.pop('link_error', None)
@@ -835,17 +835,14 @@ def body_from_sig(app, sig, utc=True):
expires = app.now() + timedelta(seconds=expires)
if expires and isinstance(expires, datetime):
expires = expires.isoformat()
- return {
- 'task': sig.task,
- 'id': sig.id,
- 'args': sig.args,
- 'kwargs': sig.kwargs,
- 'callbacks': [dict(s) for s in callbacks] if callbacks else None,
- 'errbacks': [dict(s) for s in errbacks] if errbacks else None,
- 'eta': eta,
- 'utc': utc,
- 'expires': expires,
- }
+ return TaskMessage(
+ sig.task, id=sig.id, args=sig.args,
+ kwargs=sig.kwargs,
+ callbacks=[dict(s) for s in callbacks] if callbacks else None,
+ errbacks=[dict(s) for s in errbacks] if errbacks else None,
+ eta=eta,
+ expires=expires,
+ )
@contextmanager
@@ -861,3 +858,20 @@ def restore_logging():
sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs
root.level = level
root.handlers[:] = handlers
+
+
+def TaskMessage(name, id=None, args=(), kwargs={}, **options):
+ from celery import uuid
+ from kombu.serialization import dumps
+ id = id or uuid()
+ message = Mock(name='TaskMessage-{0}'.format(id))
+ message.headers = {
+ 'id': id,
+ 'c_type': name,
+ }
+ message.headers.update(options)
+ message.content_type, message.content_encoding, message.body = dumps(
+ (args, kwargs), serializer='json',
+ )
+ message.payload = (args, kwargs)
+ return message
diff --git a/celery/tests/tasks/test_trace.py b/celery/tests/tasks/test_trace.py
index 0a7ec3345..3d55d9041 100644
--- a/celery/tests/tasks/test_trace.py
+++ b/celery/tests/tasks/test_trace.py
@@ -14,10 +14,11 @@
from celery.tests.case import AppCase, Mock, patch
-def trace(app, task, args=(), kwargs={}, propagate=False, **opts):
+def trace(app, task, args=(), kwargs={},
+ propagate=False, eager=True, request=None, **opts):
t = build_tracer(task.name, task,
- eager=True, propagate=propagate, app=app, **opts)
- ret = t('id-1', args, kwargs, None)
+ eager=eager, propagate=propagate, app=app, **opts)
+ ret = t('id-1', args, kwargs, request)
return ret.retval, ret.info
diff --git a/celery/tests/worker/test_control.py b/celery/tests/worker/test_control.py
index bb7df0daf..fc587f0d1 100644
--- a/celery/tests/worker/test_control.py
+++ b/celery/tests/worker/test_control.py
@@ -21,7 +21,7 @@
from celery.worker.control import Panel
from celery.worker.pidbox import Pidbox, gPidbox
-from celery.tests.case import AppCase, Mock, call, patch
+from celery.tests.case import AppCase, Mock, TaskMessage, call, patch
hostname = socket.gethostname()
@@ -250,12 +250,7 @@ def test_report(self):
self.panel.handle('report')
def test_active(self):
- r = Request({
- 'task': self.mytask.name,
- 'id': 'do re mi',
- 'args': (),
- 'kwargs': {},
- }, app=self.app)
+ r = Request(TaskMessage(self.mytask.name, 'do re mi'), app=self.app)
worker_state.active_requests.add(r)
try:
self.assertTrue(self.panel.handle('dump_active'))
@@ -347,12 +342,7 @@ def test_dump_schedule(self):
consumer = Consumer(self.app)
panel = self.create_panel(consumer=consumer)
self.assertFalse(panel.handle('dump_schedule'))
- r = Request({
- 'task': self.mytask.name,
- 'id': 'CAFEBABE',
- 'args': (),
- 'kwargs': {},
- }, app=self.app)
+ r = Request(TaskMessage(self.mytask.name, 'CAFEBABE'), app=self.app)
consumer.timer.schedule.enter_at(
consumer.timer.Entry(lambda x: x, (r, )),
datetime.now() + timedelta(seconds=10))
@@ -363,19 +353,14 @@ def test_dump_schedule(self):
def test_dump_reserved(self):
consumer = Consumer(self.app)
- worker_state.reserved_requests.add(Request({
- 'task': self.mytask.name,
- 'id': uuid(),
- 'args': (2, 2),
- 'kwargs': {},
- }, app=self.app))
+ worker_state.reserved_requests.add(
+ Request(TaskMessage(self.mytask.name, args=(2, 2)), app=self.app),
+ )
try:
panel = self.create_panel(consumer=consumer)
response = panel.handle('dump_reserved', {'safe': True})
self.assertDictContainsSubset(
{'name': self.mytask.name,
- 'args': (2, 2),
- 'kwargs': {},
'hostname': socket.gethostname()},
response[0],
)
diff --git a/celery/tests/worker/test_loops.py b/celery/tests/worker/test_loops.py
index 00c5d960f..80edd393a 100644
--- a/celery/tests/worker/test_loops.py
+++ b/celery/tests/worker/test_loops.py
@@ -11,7 +11,7 @@
from celery.worker.consumer import Consumer
from celery.worker.loops import asynloop, synloop
-from celery.tests.case import AppCase, Mock, body_from_sig
+from celery.tests.case import AppCase, Mock, task_message_from_sig
class X(object):
@@ -107,7 +107,7 @@ def get_task_callback(*args, **kwargs):
x = X(*args, **kwargs)
x.blueprint.state = CLOSE
asynloop(*x.args)
- return x, x.consumer.callbacks[0]
+ return x, x.consumer.on_message
class test_asynloop(AppCase):
@@ -132,45 +132,44 @@ def test_setup_heartbeat(self):
def task_context(self, sig, **kwargs):
x, on_task = get_task_callback(self.app, **kwargs)
- body = body_from_sig(self.app, sig)
- message = Mock()
- strategy = x.obj.strategies[sig.task] = Mock()
- return x, on_task, body, message, strategy
+ message = task_message_from_sig(self.app, sig)
+ strategy = x.obj.strategies[sig.task] = Mock(name='strategy')
+ return x, on_task, message, strategy
def test_on_task_received(self):
- _, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
- on_task(body, msg)
+ _, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
+ on_task(msg)
strategy.assert_called_with(
- msg, body, msg.ack_log_error, msg.reject_log_error, [],
+ msg, None, msg.ack_log_error, msg.reject_log_error, [],
)
def test_on_task_received_executes_on_task_message(self):
cbs = [Mock(), Mock(), Mock()]
- _, on_task, body, msg, strategy = self.task_context(
+ _, on_task, msg, strategy = self.task_context(
self.add.s(2, 2), on_task_message=cbs,
)
- on_task(body, msg)
+ on_task(msg)
strategy.assert_called_with(
- msg, body, msg.ack_log_error, msg.reject_log_error, cbs,
+ msg, None, msg.ack_log_error, msg.reject_log_error, cbs,
)
def test_on_task_message_missing_name(self):
- x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
- body.pop('task')
- on_task(body, msg)
- x.on_unknown_message.assert_called_with(body, msg)
+ x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
+ msg.headers.pop('c_type')
+ on_task(msg)
+ x.on_unknown_message.assert_called_with(((2, 2), {}), msg)
def test_on_task_not_registered(self):
- x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
+ x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
exc = strategy.side_effect = KeyError(self.add.name)
- on_task(body, msg)
- x.on_unknown_task.assert_called_with(body, msg, exc)
+ on_task(msg)
+ x.on_unknown_task.assert_called_with(None, msg, exc)
def test_on_task_InvalidTaskError(self):
- x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2))
+ x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
exc = strategy.side_effect = InvalidTaskError()
- on_task(body, msg)
- x.on_invalid_task.assert_called_with(body, msg, exc)
+ on_task(msg)
+ x.on_invalid_task.assert_called_with(None, msg, exc)
def test_should_terminate(self):
x = X(self.app)
diff --git a/celery/tests/worker/test_request.py b/celery/tests/worker/test_request.py
index 488ea72f4..280152475 100644
--- a/celery/tests/worker/test_request.py
+++ b/celery/tests/worker/test_request.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
-import anyjson
+import numbers
import os
import signal
import socket
@@ -10,7 +10,6 @@
from datetime import datetime, timedelta
from billiard.einfo import ExceptionInfo
-from kombu.transport.base import Message
from kombu.utils.encoding import from_utf8, default_encode
from celery import states
@@ -27,12 +26,13 @@
from celery.exceptions import (
Ignore,
InvalidTaskError,
+ Reject,
Retry,
TaskRevokedError,
Terminated,
WorkerLostError,
)
-from celery.five import keys, monotonic
+from celery.five import monotonic
from celery.signals import task_revoked
from celery.utils import uuid
from celery.worker import job as module
@@ -44,8 +44,9 @@
Case,
Mock,
SkipTest,
+ TaskMessage,
assert_signal_called,
- body_from_sig,
+ task_message_from_sig,
patch,
)
@@ -85,7 +86,7 @@ def jail(app, task_id, name, args, kwargs):
task.__trace__ = None # rebuild
return trace_task(
task, task_id, args, kwargs, request=request, eager=False, app=app,
- )
+ ).retval
class test_default_encode(AppCase):
@@ -138,7 +139,7 @@ def mytask_raising(i):
raise KeyError(i)
self.mytask_raising = mytask_raising
- @patch('celery.app.trace._logger')
+ @patch('celery.app.trace.logger')
def test_process_cleanup_fails(self, _logger):
self.mytask.backend = Mock()
self.mytask.backend.process_cleanup = Mock(side_effect=KeyError())
@@ -227,9 +228,10 @@ def mytask_raising(i):
def get_request(self, sig, Request=Request, **kwargs):
return Request(
- body_from_sig(self.app, sig),
- on_ack=Mock(),
- eventer=Mock(),
+ task_message_from_sig(self.app, sig),
+ on_ack=Mock(name='on_ack'),
+ on_reject=Mock(name='on_reject'),
+ eventer=Mock(name='eventer'),
app=self.app,
connection_errors=(socket.error, ),
task=sig.type,
@@ -273,7 +275,7 @@ def test_on_failure_Termianted(self):
uuid=req.id, terminated=True, signum='9', expired=False,
)
- def test_log_error_propagates_MemoryError(self):
+ def test_on_failure_propagates_MemoryError(self):
einfo = None
try:
raise MemoryError()
@@ -282,9 +284,9 @@ def test_log_error_propagates_MemoryError(self):
self.assertIsNotNone(einfo)
req = self.get_request(self.add.s(2, 2))
with self.assertRaises(MemoryError):
- req._log_error(einfo)
+ req.on_failure(einfo)
- def test_log_error_when_Ignore(self):
+ def test_on_failure_Ignore_acknowledges(self):
einfo = None
try:
raise Ignore()
@@ -292,48 +294,55 @@ def test_log_error_when_Ignore(self):
einfo = ExceptionInfo(internal=True)
self.assertIsNotNone(einfo)
req = self.get_request(self.add.s(2, 2))
- req._log_error(einfo)
+ req.on_failure(einfo)
req.on_ack.assert_called_with(req_logger, req.connection_errors)
+ def test_on_failure_Reject_rejects(self):
+ einfo = None
+ try:
+ raise Reject()
+ except Reject:
+ einfo = ExceptionInfo(internal=True)
+ self.assertIsNotNone(einfo)
+ req = self.get_request(self.add.s(2, 2))
+ req.on_failure(einfo)
+ req.on_reject.assert_called_with(
+ req_logger, req.connection_errors, False,
+ )
+
+ def test_on_failure_Reject_rejects_with_requeue(self):
+ einfo = None
+ try:
+ raise Reject(requeue=True)
+ except Reject:
+ einfo = ExceptionInfo(internal=True)
+ self.assertIsNotNone(einfo)
+ req = self.get_request(self.add.s(2, 2))
+ req.on_failure(einfo)
+ req.on_reject.assert_called_with(
+ req_logger, req.connection_errors, True,
+ )
+
def test_tzlocal_is_cached(self):
req = self.get_request(self.add.s(2, 2))
req._tzlocal = 'foo'
self.assertEqual(req.tzlocal, 'foo')
- def test_execute_magic_kwargs(self):
- task = self.add.s(2, 2)
- task.freeze()
- req = self.get_request(task)
- self.add.accept_magic_kwargs = True
- pool = Mock()
- req.execute_using_pool(pool)
- self.assertTrue(pool.apply_async.called)
- args = pool.apply_async.call_args[1]['args']
- self.assertEqual(args[0], task.task)
- self.assertEqual(args[1], task.id)
- self.assertEqual(args[2], task.args)
- kwargs = args[3]
- self.assertEqual(kwargs.get('task_name'), task.task)
-
- def xRequest(self, body=None, **kwargs):
- body = dict({'task': self.mytask.name,
- 'id': uuid(),
- 'args': [1],
- 'kwargs': {'f': 'x'}}, **body or {})
- return Request(body, app=self.app, **kwargs)
+ def xRequest(self, name=None, id=None, args=None, kwargs=None,
+ on_ack=None, on_reject=None, **head):
+ args = [1] if args is None else args
+ kwargs = {'f': 'x'} if kwargs is None else kwargs
+ on_ack = on_ack or Mock(name='on_ack')
+ on_reject = on_reject or Mock(name='on_reject')
+ message = TaskMessage(
+ name or self.mytask.name, id, args=args, kwargs=kwargs, **head
+ )
+ return Request(message, app=self.app,
+ on_ack=on_ack, on_reject=on_reject)
def test_task_wrapper_repr(self):
self.assertTrue(repr(self.xRequest()))
- @patch('celery.worker.job.kwdict')
- def test_kwdict(self, kwdict):
- prev, module.NEEDS_KWDICT = module.NEEDS_KWDICT, True
- try:
- self.xRequest()
- self.assertTrue(kwdict.called)
- finally:
- module.NEEDS_KWDICT = prev
-
def test_sets_store_errors(self):
self.mytask.ignore_result = True
job = self.xRequest()
@@ -350,12 +359,7 @@ def test_send_event(self):
self.assertIn('task-frobulated', job.eventer.sent)
def test_on_retry(self):
- job = Request({
- 'task': self.mytask.name,
- 'id': uuid(),
- 'args': [1],
- 'kwargs': {'f': 'x'},
- }, app=self.app)
+ job = self.get_request(self.mytask.s(1, f='x'))
job.eventer = MockEventDispatcher()
try:
raise Retry('foo', KeyError('moofoobar'))
@@ -372,12 +376,7 @@ def test_on_retry(self):
job.on_failure(einfo)
def test_compat_properties(self):
- job = Request({
- 'task': self.mytask.name,
- 'id': uuid(),
- 'args': [1],
- 'kwargs': {'f': 'x'},
- }, app=self.app)
+ job = self.xRequest()
self.assertEqual(job.task_id, job.id)
self.assertEqual(job.task_name, job.name)
job.task_id = 'ID'
@@ -388,12 +387,7 @@ def test_compat_properties(self):
def test_terminate__task_started(self):
pool = Mock()
signum = signal.SIGTERM
- job = Request({
- 'task': self.mytask.name,
- 'id': uuid(),
- 'args': [1],
- 'kwrgs': {'f': 'x'},
- }, app=self.app)
+ job = self.get_request(self.mytask.s(1, f='x'))
with assert_signal_called(
task_revoked, sender=job.task, request=job,
terminated=True, expired=False, signum=signum):
@@ -404,12 +398,7 @@ def test_terminate__task_started(self):
def test_terminate__task_reserved(self):
pool = Mock()
- job = Request({
- 'task': self.mytask.name,
- 'id': uuid(),
- 'args': [1],
- 'kwargs': {'f': 'x'},
- }, app=self.app)
+ job = self.get_request(self.mytask.s(1, f='x'))
job.time_start = None
job.terminate(pool, signal='TERM')
self.assertFalse(pool.terminate_job.called)
@@ -417,13 +406,9 @@ def test_terminate__task_reserved(self):
job.terminate(pool, signal='TERM')
def test_revoked_expires_expired(self):
- job = Request({
- 'task': self.mytask.name,
- 'id': uuid(),
- 'args': [1],
- 'kwargs': {'f': 'x'},
- 'expires': datetime.utcnow() - timedelta(days=1),
- }, app=self.app)
+ job = self.get_request(self.mytask.s(1, f='x').set(
+ expires=datetime.utcnow() - timedelta(days=1)
+ ))
with assert_signal_called(
task_revoked, sender=job.task, request=job,
terminated=False, expired=True, signum=None):
@@ -435,9 +420,9 @@ def test_revoked_expires_expired(self):
)
def test_revoked_expires_not_expired(self):
- job = self.xRequest({
- 'expires': datetime.utcnow() + timedelta(days=1),
- })
+ job = self.xRequest(
+ expires=datetime.utcnow() + timedelta(days=1),
+ )
job.revoked()
self.assertNotIn(job.id, revoked)
self.assertNotEqual(
@@ -447,47 +432,15 @@ def test_revoked_expires_not_expired(self):
def test_revoked_expires_ignore_result(self):
self.mytask.ignore_result = True
- job = self.xRequest({
- 'expires': datetime.utcnow() - timedelta(days=1),
- })
+ job = self.xRequest(
+ expires=datetime.utcnow() - timedelta(days=1),
+ )
job.revoked()
self.assertIn(job.id, revoked)
self.assertNotEqual(
self.mytask.backend.get_status(job.id), states.REVOKED,
)
- def test_send_email(self):
- app = self.app
- mail_sent = [False]
-
- def mock_mail_admins(*args, **kwargs):
- mail_sent[0] = True
-
- def get_ei():
- try:
- raise KeyError('moofoobar')
- except:
- return ExceptionInfo()
-
- app.mail_admins = mock_mail_admins
- self.mytask.send_error_emails = True
- job = self.xRequest()
- einfo = get_ei()
- job.on_failure(einfo)
- self.assertTrue(mail_sent[0])
-
- einfo = get_ei()
- mail_sent[0] = False
- self.mytask.send_error_emails = False
- job.on_failure(einfo)
- self.assertFalse(mail_sent[0])
-
- einfo = get_ei()
- mail_sent[0] = False
- self.mytask.send_error_emails = True
- job.on_failure(einfo)
- self.assertTrue(mail_sent[0])
-
def test_already_revoked(self):
job = self.xRequest()
job._already_revoked = True
@@ -510,10 +463,10 @@ def test_execute_does_not_execute_revoked(self):
def test_execute_acks_late(self):
self.mytask_raising.acks_late = True
- job = self.xRequest({
- 'task': self.mytask_raising.name,
- 'kwargs': {},
- })
+ job = self.xRequest(
+ name=self.mytask_raising.name,
+ kwargs={},
+ )
job.execute()
self.assertTrue(job.acknowledged)
job.execute()
@@ -555,10 +508,10 @@ def test_on_accepted_terminates(self):
def test_on_success_acks_early(self):
job = self.xRequest()
job.time_start = 1
- job.on_success(42)
+ job.on_success((0, 42, 0.001))
prev, module._does_info = module._does_info, False
try:
- job.on_success(42)
+ job.on_success((0, 42, 0.001))
self.assertFalse(job.acknowledged)
finally:
module._does_info = prev
@@ -570,7 +523,7 @@ def test_on_success_BaseException(self):
try:
raise SystemExit()
except SystemExit:
- job.on_success(ExceptionInfo())
+ job.on_success((1, ExceptionInfo(), 0.01))
else:
assert False
@@ -579,7 +532,7 @@ def test_on_success_eventer(self):
job.time_start = 1
job.eventer = Mock()
job.eventer.send = Mock()
- job.on_success(42)
+ job.on_success((0, 42, 0.001))
self.assertTrue(job.eventer.send.called)
def test_on_success_when_failure(self):
@@ -589,14 +542,14 @@ def test_on_success_when_failure(self):
try:
raise KeyError('foo')
except Exception:
- job.on_success(ExceptionInfo())
+ job.on_success((1, ExceptionInfo(), 0.001))
self.assertTrue(job.on_failure.called)
def test_on_success_acks_late(self):
job = self.xRequest()
job.time_start = 1
self.mytask.acks_late = True
- job.on_success(42)
+ job.on_success((0, 42, 0.001))
self.assertTrue(job.acknowledged)
def test_on_failure_WorkerLostError(self):
@@ -634,9 +587,10 @@ def test_on_failure_acks_late(self):
self.assertTrue(job.acknowledged)
def test_from_message_invalid_kwargs(self):
- body = dict(task=self.mytask.name, id=1, args=(), kwargs='foo')
+ m = TaskMessage(self.mytask.name, args=(), kwargs='foo')
+ req = Request(m, app=self.app)
with self.assertRaises(InvalidTaskError):
- Request(body, message=None, app=self.app)
+ raise req.execute().exception
@patch('celery.worker.job.error')
@patch('celery.worker.job.warn')
@@ -662,37 +616,60 @@ def test_fast_trace_task(self):
from celery.app import trace
setup_worker_optimizations(self.app)
self.assertIs(trace.trace_task_ret, trace._fast_trace_task)
+ tid = uuid()
+ message = TaskMessage(self.mytask.name, tid, args=[4])
try:
self.mytask.__trace__ = build_tracer(
self.mytask.name, self.mytask, self.app.loader, 'test',
app=self.app,
)
- res = trace.trace_task_ret(self.mytask.name, uuid(), [4], {})
- self.assertEqual(res, 4 ** 4)
+ failed, res, runtime = trace.trace_task_ret(
+ self.mytask.name, tid, message.headers, message.body,
+ message.content_type, message.content_encoding)
+ self.assertFalse(failed)
+ self.assertEqual(res, repr(4 ** 4))
+ self.assertTrue(runtime)
+ self.assertIsInstance(runtime, numbers.Real)
finally:
reset_worker_optimizations()
self.assertIs(trace.trace_task_ret, trace._trace_task_ret)
delattr(self.mytask, '__trace__')
- res = trace.trace_task_ret(
- self.mytask.name, uuid(), [4], {}, app=self.app,
+ failed, res, runtime = trace.trace_task_ret(
+ self.mytask.name, tid, message.headers, message.body,
+ message.content_type, message.content_encoding, app=self.app,
)
- self.assertEqual(res, 4 ** 4)
+ self.assertFalse(failed)
+ self.assertEqual(res, repr(4 ** 4))
+ self.assertTrue(runtime)
+ self.assertIsInstance(runtime, numbers.Real)
def test_trace_task_ret(self):
self.mytask.__trace__ = build_tracer(
self.mytask.name, self.mytask, self.app.loader, 'test',
app=self.app,
)
- res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app)
- self.assertEqual(res, 4 ** 4)
+ tid = uuid()
+ message = TaskMessage(self.mytask.name, tid, args=[4])
+ _, R, _ = _trace_task_ret(
+ self.mytask.name, tid, message.headers,
+ message.body, message.content_type,
+ message.content_encoding, app=self.app,
+ )
+ self.assertEqual(R, repr(4 ** 4))
def test_trace_task_ret__no_trace(self):
try:
delattr(self.mytask, '__trace__')
except AttributeError:
pass
- res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app)
- self.assertEqual(res, 4 ** 4)
+ tid = uuid()
+ message = TaskMessage(self.mytask.name, tid, args=[4])
+ _, R, _ = _trace_task_ret(
+ self.mytask.name, tid, message.headers,
+ message.body, message.content_type,
+ message.content_encoding, app=self.app,
+ )
+ self.assertEqual(R, repr(4 ** 4))
def test_trace_catches_exception(self):
@@ -705,7 +682,7 @@ def raising():
with self.assertWarnsRegex(RuntimeWarning,
r'Exception raised outside'):
- res = trace_task(raising, uuid(), [], {}, app=self.app)
+ res = trace_task(raising, uuid(), [], {}, app=self.app)[0]
self.assertIsInstance(res, ExceptionInfo)
def test_worker_task_trace_handle_retry(self):
@@ -749,71 +726,39 @@ def test_worker_task_trace_handle_failure(self):
finally:
self.mytask.pop_request()
- def test_task_wrapper_mail_attrs(self):
- job = self.xRequest({'args': [], 'kwargs': {}})
- x = job.success_msg % {
- 'name': job.name,
- 'id': job.id,
- 'return_value': 10,
- 'runtime': 0.3641,
- }
- self.assertTrue(x)
- x = job.error_msg % {
- 'name': job.name,
- 'id': job.id,
- 'exc': 'FOOBARBAZ',
- 'description': 'raised unexpected',
- 'traceback': 'foobarbaz',
- }
- self.assertTrue(x)
-
def test_from_message(self):
us = 'æØåveéðƒeæ'
- body = {'task': self.mytask.name, 'id': uuid(),
- 'args': [2], 'kwargs': {us: 'bar'}}
- m = Message(None, body=anyjson.dumps(body), backend='foo',
- content_type='application/json',
- content_encoding='utf-8')
- job = Request(m.decode(), message=m, app=self.app)
+ tid = uuid()
+ m = TaskMessage(self.mytask.name, tid, args=[2], kwargs={us: 'bar'})
+ job = Request(m, app=self.app)
self.assertIsInstance(job, Request)
- self.assertEqual(job.name, body['task'])
- self.assertEqual(job.id, body['id'])
- self.assertEqual(job.args, body['args'])
- us = from_utf8(us)
- if sys.version_info < (2, 6):
- self.assertEqual(next(keys(job.kwargs)), us)
- self.assertIsInstance(next(keys(job.kwargs)), str)
+ self.assertEqual(job.name, self.mytask.name)
+ self.assertEqual(job.id, tid)
+ self.assertIs(job.message, m)
def test_from_message_empty_args(self):
- body = {'task': self.mytask.name, 'id': uuid()}
- m = Message(None, body=anyjson.dumps(body), backend='foo',
- content_type='application/json',
- content_encoding='utf-8')
- job = Request(m.decode(), message=m, app=self.app)
+ tid = uuid()
+ m = TaskMessage(self.mytask.name, tid, args=[], kwargs={})
+ job = Request(m, app=self.app)
self.assertIsInstance(job, Request)
- self.assertEqual(job.args, [])
- self.assertEqual(job.kwargs, {})
def test_from_message_missing_required_fields(self):
- body = {}
- m = Message(None, body=anyjson.dumps(body), backend='foo',
- content_type='application/json',
- content_encoding='utf-8')
+ m = TaskMessage(self.mytask.name)
+ m.headers.clear()
with self.assertRaises(KeyError):
- Request(m.decode(), message=m, app=self.app)
+ Request(m, app=self.app)
def test_from_message_nonexistant_task(self):
- body = {'task': 'cu.mytask.doesnotexist', 'id': uuid(),
- 'args': [2], 'kwargs': {'æØåveéðƒeæ': 'bar'}}
- m = Message(None, body=anyjson.dumps(body), backend='foo',
- content_type='application/json',
- content_encoding='utf-8')
+ m = TaskMessage(
+ 'cu.mytask.doesnotexist',
+ args=[2], kwargs={'æØåveéðƒeæ': 'bar'},
+ )
with self.assertRaises(KeyError):
- Request(m.decode(), message=m, app=self.app)
+ Request(m, app=self.app)
def test_execute(self):
tid = uuid()
- job = self.xRequest({'id': tid, 'args': [4], 'kwargs': {}})
+ job = self.xRequest(id=tid, args=[4], kwargs={})
self.assertEqual(job.execute(), 256)
meta = self.mytask.backend.get_task_meta(tid)
self.assertEqual(meta['status'], states.SUCCESS)
@@ -826,38 +771,17 @@ def mytask_no_kwargs(i):
return i ** i
tid = uuid()
- job = self.xRequest({
- 'task': mytask_no_kwargs.name,
- 'id': tid,
- 'args': [4],
- 'kwargs': {},
- })
+ job = self.xRequest(
+ name=mytask_no_kwargs.name,
+ id=tid,
+ args=[4],
+ kwargs={},
+ )
self.assertEqual(job.execute(), 256)
meta = mytask_no_kwargs.backend.get_task_meta(tid)
self.assertEqual(meta['result'], 256)
self.assertEqual(meta['status'], states.SUCCESS)
- def test_execute_success_some_kwargs(self):
- scratch = {'task_id': None}
-
- @self.app.task(shared=False, accept_magic_kwargs=True)
- def mytask_some_kwargs(i, task_id):
- scratch['task_id'] = task_id
- return i ** i
-
- tid = uuid()
- job = self.xRequest({
- 'task': mytask_some_kwargs.name,
- 'id': tid,
- 'args': [4],
- 'kwargs': {},
- })
- self.assertEqual(job.execute(), 256)
- meta = mytask_some_kwargs.backend.get_task_meta(tid)
- self.assertEqual(scratch.get('task_id'), tid)
- self.assertEqual(meta['result'], 256)
- self.assertEqual(meta['status'], states.SUCCESS)
-
def test_execute_ack(self):
scratch = {'ACK': False}
@@ -865,7 +789,7 @@ def on_ack(*args, **kwargs):
scratch['ACK'] = True
tid = uuid()
- job = self.xRequest({'id': tid, 'args': [4]}, on_ack=on_ack)
+ job = self.xRequest(id=tid, args=[4], on_ack=on_ack)
self.assertEqual(job.execute(), 256)
meta = self.mytask.backend.get_task_meta(tid)
self.assertTrue(scratch['ACK'])
@@ -874,12 +798,13 @@ def on_ack(*args, **kwargs):
def test_execute_fail(self):
tid = uuid()
- job = self.xRequest({
- 'task': self.mytask_raising.name,
- 'id': tid,
- 'args': [4],
- 'kwargs': {},
- })
+ job = self.xRequest(
+ name=self.mytask_raising.name,
+ id=tid,
+ args=[4],
+ kwargs={},
+ )
+ print(job.execute())
self.assertIsInstance(job.execute(), ExceptionInfo)
meta = self.mytask_raising.backend.get_task_meta(tid)
self.assertEqual(meta['status'], states.FAILURE)
@@ -887,7 +812,7 @@ def test_execute_fail(self):
def test_execute_using_pool(self):
tid = uuid()
- job = self.xRequest({'id': tid, 'args': [4]})
+ job = self.xRequest(id=tid, args=[4])
class MockPool(BasePool):
target = None
@@ -908,48 +833,21 @@ def apply_async(self, target, args=None, kwargs=None,
self.assertTrue(p.target)
self.assertEqual(p.args[0], self.mytask.name)
self.assertEqual(p.args[1], tid)
- self.assertEqual(p.args[2], [4])
- self.assertIn('f', p.args[3])
- self.assertIn([4], p.args)
+ self.assertEqual(p.args[3], job.message.body)
job.task.accept_magic_kwargs = False
job.execute_using_pool(p)
- def test_default_kwargs(self):
- self.maxDiff = 3000
- tid = uuid()
- job = self.xRequest({'id': tid, 'args': [4]})
- self.assertDictEqual(
- job.extend_with_default_kwargs(), {
- 'f': 'x',
- 'logfile': None,
- 'loglevel': None,
- 'task_id': job.id,
- 'task_retries': 0,
- 'task_is_eager': False,
- 'delivery_info': {
- 'exchange': None,
- 'routing_key': None,
- 'priority': 0,
- 'redelivered': False,
- },
- 'task_name': job.name})
-
- @patch('celery.worker.job.logger')
- def _test_on_failure(self, exception, logger):
- app = self.app
+ def _test_on_failure(self, exception):
tid = uuid()
- job = self.xRequest({'id': tid, 'args': [4]})
+ job = self.xRequest(id=tid, args=[4])
+ job.send_event = Mock(name='send_event')
try:
raise exception
except Exception:
exc_info = ExceptionInfo()
- app.conf.CELERY_SEND_TASK_ERROR_EMAILS = True
job.on_failure(exc_info)
- self.assertTrue(logger.log.called)
- context = logger.log.call_args[0][2]
- self.assertEqual(self.mytask.name, context['name'])
- self.assertIn(tid, context['id'])
+ self.assertTrue(job.send_event.called)
def test_on_failure(self):
self._test_on_failure(Exception('Inside unit tests'))
diff --git a/celery/tests/worker/test_strategy.py b/celery/tests/worker/test_strategy.py
index 7edf78bff..87ae65006 100644
--- a/celery/tests/worker/test_strategy.py
+++ b/celery/tests/worker/test_strategy.py
@@ -8,7 +8,7 @@
from celery.worker import state
from celery.utils.timeutils import rate
-from celery.tests.case import AppCase, Mock, patch, body_from_sig
+from celery.tests.case import AppCase, Mock, patch, task_message_from_sig
class test_default_strategy(AppCase):
@@ -22,17 +22,16 @@ def add(x, y):
class Context(object):
- def __init__(self, sig, s, reserved, consumer, message, body):
+ def __init__(self, sig, s, reserved, consumer, message):
self.sig = sig
self.s = s
self.reserved = reserved
self.consumer = consumer
self.message = message
- self.body = body
def __call__(self, **kwargs):
return self.s(
- self.message, self.body,
+ self.message, None,
self.message.ack, self.message.reject, [], **kwargs
)
@@ -76,10 +75,8 @@ def _context(self, sig,
s = sig.type.start_strategy(self.app, consumer, task_reserved=reserved)
self.assertTrue(s)
- message = Mock()
- body = body_from_sig(self.app, sig, utc=utc)
-
- yield self.Context(sig, s, reserved, consumer, message, body)
+ message = task_message_from_sig(self.app, sig, utc=utc)
+ yield self.Context(sig, s, reserved, consumer, message)
def test_when_logging_disabled(self):
with patch('celery.worker.strategy.logger') as logger:
diff --git a/celery/tests/worker/test_worker.py b/celery/tests/worker/test_worker.py
index b700a6ca6..e939a4e8b 100644
--- a/celery/tests/worker/test_worker.py
+++ b/celery/tests/worker/test_worker.py
@@ -17,7 +17,7 @@
from celery.concurrency.base import BasePool
from celery.datastructures import AttributeDict
from celery.exceptions import (
- WorkerShutdown, WorkerTerminate, TaskRevokedError,
+ WorkerShutdown, WorkerTerminate, TaskRevokedError, InvalidTaskError,
)
from celery.five import Empty, range, Queue as FastQueue
from celery.utils import uuid
@@ -29,7 +29,9 @@
from celery.utils.serialization import pickle
from celery.utils.timer2 import Timer
-from celery.tests.case import AppCase, Mock, SkipTest, patch, restore_logging
+from celery.tests.case import (
+ AppCase, Mock, SkipTest, TaskMessage, patch, restore_logging,
+)
def MockStep(step=None):
@@ -123,6 +125,13 @@ def create_message(channel, **data):
return m
+def create_task_message(channel, *args, **kwargs):
+ m = TaskMessage(*args, **kwargs)
+ m.channel = channel
+ m.delivery_info = {'consumer_tag': 'mock'}
+ return m
+
+
class test_Consumer(AppCase):
def setup(self):
@@ -207,13 +216,13 @@ def test_receive_message_unknown(self, warn):
l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
l.blueprint.state = RUN
l.steps.pop()
- backend = Mock()
- m = create_message(backend, unknown={'baz': '!!!'})
+ channel = Mock()
+ m = create_message(channel, unknown={'baz': '!!!'})
l.event_dispatcher = mock_event_dispatcher()
l.node = MockNode()
callback = self._get_on_message(l)
- callback(m.decode(), m)
+ callback(m)
self.assertTrue(warn.call_count)
@patch('celery.worker.strategy.to_timestamp')
@@ -222,17 +231,18 @@ def test_receive_message_eta_OverflowError(self, to_timestamp):
l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
l.blueprint.state = RUN
l.steps.pop()
- m = create_message(Mock(), task=self.foo_task.name,
- args=('2, 2'),
- kwargs={},
- eta=datetime.now().isoformat())
+ m = create_task_message(
+ Mock(), self.foo_task.name,
+ args=('2, 2'), kwargs={},
+ eta=datetime.now().isoformat(),
+ )
l.event_dispatcher = mock_event_dispatcher()
l.node = MockNode()
l.update_strategies()
l.qos = Mock()
callback = self._get_on_message(l)
- callback(m.decode(), m)
+ callback(m)
self.assertTrue(m.acknowledged)
@patch('celery.worker.consumer.error')
@@ -241,13 +251,17 @@ def test_receive_message_InvalidTaskError(self, error):
l.blueprint.state = RUN
l.event_dispatcher = mock_event_dispatcher()
l.steps.pop()
- m = create_message(Mock(), task=self.foo_task.name,
- args=(1, 2), kwargs='foobarbaz', id=1)
+ m = create_task_message(
+ Mock(), self.foo_task.name,
+ args=(1, 2), kwargs='foobarbaz', id=1)
l.update_strategies()
l.event_dispatcher = mock_event_dispatcher()
+ strat = l.strategies[self.foo_task.name] = Mock(name='strategy')
+ strat.side_effect = InvalidTaskError()
callback = self._get_on_message(l)
- callback(m.decode(), m)
+ callback(m)
+ self.assertTrue(error.called)
self.assertIn('Received invalid task message', error.call_args[0][0])
@patch('celery.worker.consumer.crit')
@@ -274,18 +288,20 @@ def _get_on_message(self, l):
with self.assertRaises(WorkerShutdown):
l.loop(*l.loop_args())
- self.assertTrue(l.task_consumer.register_callback.called)
- return l.task_consumer.register_callback.call_args[0][0]
+ self.assertTrue(l.task_consumer.on_message)
+ return l.task_consumer.on_message
def test_receieve_message(self):
l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
l.blueprint.state = RUN
l.event_dispatcher = mock_event_dispatcher()
- m = create_message(Mock(), task=self.foo_task.name,
- args=[2, 4, 8], kwargs={})
+ m = create_task_message(
+ Mock(), self.foo_task.name,
+ args=[2, 4, 8], kwargs={},
+ )
l.update_strategies()
callback = self._get_on_message(l)
- callback(m.decode(), m)
+ callback(m)
in_bucket = self.buffer.get_nowait()
self.assertIsInstance(in_bucket, Request)
@@ -419,8 +435,8 @@ def test_receieve_message_eta_isoformat(self):
l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
l.blueprint.state = RUN
l.steps.pop()
- m = create_message(
- Mock(), task=self.foo_task.name,
+ m = create_task_message(
+ Mock(), self.foo_task.name,
eta=(datetime.now() + timedelta(days=1)).isoformat(),
args=[2, 4, 8], kwargs={},
)
@@ -432,7 +448,7 @@ def test_receieve_message_eta_isoformat(self):
l.enabled = False
l.update_strategies()
callback = self._get_on_message(l)
- callback(m.decode(), m)
+ callback(m)
l.timer.stop()
l.timer.join(1)
@@ -469,27 +485,31 @@ def test_revoke(self):
l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
l.blueprint.state = RUN
l.steps.pop()
- backend = Mock()
+ channel = Mock()
id = uuid()
- t = create_message(backend, task=self.foo_task.name, args=[2, 4, 8],
- kwargs={}, id=id)
+ t = create_task_message(
+ channel, self.foo_task.name,
+ args=[2, 4, 8], kwargs={}, id=id,
+ )
from celery.worker.state import revoked
revoked.add(id)
callback = self._get_on_message(l)
- callback(t.decode(), t)
+ callback(t)
self.assertTrue(self.buffer.empty())
def test_receieve_message_not_registered(self):
l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
l.blueprint.state = RUN
l.steps.pop()
- backend = Mock()
- m = create_message(backend, task='x.X.31x', args=[2, 4, 8], kwargs={})
+ channel = Mock(name='channel')
+ m = create_task_message(
+ channel, 'x.X.31x', args=[2, 4, 8], kwargs={},
+ )
l.event_dispatcher = mock_event_dispatcher()
callback = self._get_on_message(l)
- self.assertFalse(callback(m.decode(), m))
+ self.assertFalse(callback(m))
with self.assertRaises(Empty):
self.buffer.get_nowait()
self.assertTrue(self.timer.empty())
@@ -499,21 +519,25 @@ def test_receieve_message_not_registered(self):
def test_receieve_message_ack_raises(self, logger, warn):
l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
l.blueprint.state = RUN
- backend = Mock()
- m = create_message(backend, args=[2, 4, 8], kwargs={})
+ channel = Mock()
+ m = create_task_message(
+ channel, self.foo_task.name,
+ args=[2, 4, 8], kwargs={},
+ )
+ m.headers = None
l.event_dispatcher = mock_event_dispatcher()
+ l.update_strategies()
l.connection_errors = (socket.error, )
m.reject = Mock()
m.reject.side_effect = socket.error('foo')
callback = self._get_on_message(l)
- self.assertFalse(callback(m.decode(), m))
+ self.assertFalse(callback(m))
self.assertTrue(warn.call_count)
with self.assertRaises(Empty):
self.buffer.get_nowait()
self.assertTrue(self.timer.empty())
- m.reject.assert_called_with(requeue=False)
- self.assertTrue(logger.critical.call_count)
+ m.reject_log_error.assert_called_with(logger, l.connection_errors)
def test_receive_message_eta(self):
import sys
@@ -529,10 +553,10 @@ def pp(*args, **kwargs):
pp('-CREATE MYKOMBUCONSUMER')
l.steps.pop()
l.event_dispatcher = mock_event_dispatcher()
- backend = Mock()
+ channel = Mock(name='channel')
pp('+ CREATE MESSAGE')
- m = create_message(
- backend, task=self.foo_task.name,
+ m = create_task_message(
+ channel, self.foo_task.name,
args=[2, 4, 8], kwargs={},
eta=(datetime.now() + timedelta(days=1)).isoformat(),
)
@@ -556,7 +580,7 @@ def pp(*args, **kwargs):
callback = self._get_on_message(l)
pp('- GET ON MESSAGE')
pp('+ CALLBACK')
- callback(m.decode(), m)
+ callback(m)
pp('- CALLBACK')
finally:
pp('+ STOP TIMER')
@@ -925,10 +949,12 @@ def test_on_timer_tick(self):
def test_process_task(self):
worker = self.worker
worker.pool = Mock()
- backend = Mock()
- m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
- kwargs={})
- task = Request(m.decode(), message=m, app=self.app)
+ channel = Mock()
+ m = create_task_message(
+ channel, self.foo_task.name,
+ args=[4, 8, 10], kwargs={},
+ )
+ task = Request(m, app=self.app)
worker._process_task(task)
self.assertEqual(worker.pool.apply_async.call_count, 1)
worker.pool.stop()
@@ -937,10 +963,12 @@ def test_process_task_raise_base(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = KeyboardInterrupt('Ctrl+C')
- backend = Mock()
- m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
- kwargs={})
- task = Request(m.decode(), message=m, app=self.app)
+ channel = Mock()
+ m = create_task_message(
+ channel, self.foo_task.name,
+ args=[4, 8, 10], kwargs={},
+ )
+ task = Request(m, app=self.app)
worker.steps = []
worker.blueprint.state = RUN
with self.assertRaises(KeyboardInterrupt):
@@ -950,10 +978,12 @@ def test_process_task_raise_WorkerTerminate(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = WorkerTerminate()
- backend = Mock()
- m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
- kwargs={})
- task = Request(m.decode(), message=m, app=self.app)
+ channel = Mock()
+ m = create_task_message(
+ channel, self.foo_task.name,
+ args=[4, 8, 10], kwargs={},
+ )
+ task = Request(m, app=self.app)
worker.steps = []
worker.blueprint.state = RUN
with self.assertRaises(SystemExit):
@@ -963,10 +993,12 @@ def test_process_task_raise_regular(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = KeyError('some exception')
- backend = Mock()
- m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10],
- kwargs={})
- task = Request(m.decode(), message=m, app=self.app)
+ channel = Mock()
+ m = create_task_message(
+ channel, self.foo_task.name,
+ args=[4, 8, 10], kwargs={},
+ )
+ task = Request(m, app=self.app)
worker._process_task(task)
worker.pool.stop()
diff --git a/celery/worker/autoscale.py b/celery/worker/autoscale.py
index 14afc2e95..c8038b36d 100644
--- a/celery/worker/autoscale.py
+++ b/celery/worker/autoscale.py
@@ -81,7 +81,7 @@ def body(self):
self.maybe_scale()
sleep(1.0)
- def _maybe_scale(self):
+ def _maybe_scale(self, req=None):
procs = self.processes
cur = min(self.qty, self.max_concurrency)
if cur > procs:
diff --git a/celery/worker/consumer.py b/celery/worker/consumer.py
index 33facabd1..3bb430c43 100644
--- a/celery/worker/consumer.py
+++ b/celery/worker/consumer.py
@@ -447,37 +447,30 @@ def create_task_handler(self):
on_invalid_task = self.on_invalid_task
callbacks = self.on_task_message
- def on_v1_task_received(body, message):
- try:
- name = body['task']
- except (KeyError, TypeError):
- return on_unknown_message(body, message)
-
- try:
- strategies[name](message, body,
- message.ack_log_error,
- message.reject_log_error,
- callbacks)
- except KeyError as exc:
- on_unknown_task(body, message, exc)
- except InvalidTaskError as exc:
- on_invalid_task(body, message, exc)
-
def on_task_received(message):
- headers = message.headers
+
+ # payload will only be set for v1 protocol, since v2
+ # will defer deserializing the message body to the pool.
+ payload = None
try:
- type_ = headers['c_type']
+ type_ = message.headers['c_type'] # protocol v2
+ except TypeError:
+ return on_unknown_message(None, message)
except KeyError:
- return on_v1_task_received(message.payload, message)
+ payload = message.payload
+ try:
+ type_ = payload['task'] # protocol v1
+ except (TypeError, KeyError):
+ return on_unknown_message(payload, message)
try:
strategies[type_](
message, None,
message.ack_log_error, message.reject_log_error, callbacks,
)
except KeyError as exc:
- on_unknown_task(None, message, exc)
+ on_unknown_task(payload, message, exc)
except InvalidTaskError as exc:
- on_invalid_task(None, message, exc)
+ on_invalid_task(payload, message, exc)
return on_task_received
diff --git a/celery/worker/job.py b/celery/worker/job.py
index 74278cc1e..0fd57f95a 100644
--- a/celery/worker/job.py
+++ b/celery/worker/job.py
@@ -13,7 +13,6 @@
import socket
import sys
-from billiard.einfo import ExceptionInfo
from datetime import datetime
from weakref import ref
@@ -83,7 +82,7 @@ def __optimize__():
class RequestV1(object):
if not IS_PYPY:
__slots__ = (
- 'app', 'name', 'id', 'root_id', 'parent_id',
+ 'app', 'message', 'name', 'id', 'root_id', 'parent_id',
'on_ack', 'hostname', 'eventer', 'connection_errors', 'task',
'eta', 'expires', 'request_dict', 'acknowledged', 'on_reject',
'utc', 'time_start', 'worker_pid', '_already_revoked',
@@ -94,9 +93,10 @@ class RequestV1(object):
class Request(object):
"""A request for task execution."""
+ utc = True
if not IS_PYPY: # pragma: no cover
__slots__ = (
- 'app', 'name', 'id', 'on_ack', 'payload',
+ 'app', 'name', 'id', 'on_ack', 'body',
'hostname', 'eventer', 'connection_errors', 'task', 'eta',
'expires', 'request_dict', 'acknowledged', 'on_reject',
'utc', 'time_start', 'worker_pid', 'timeouts',
@@ -111,9 +111,10 @@ def __init__(self, message, on_ack=noop,
task=None, on_reject=noop, **opts):
headers = message.headers
self.app = app
+ self.message = message
name = self.name = headers['c_type']
- self.id = headers['task_id']
- self.payload = message.body
+ self.id = headers['id']
+ self.body = message.body
self.content_type = message.content_type
self.content_encoding = message.content_encoding
eta = headers.get('eta')
@@ -185,14 +186,14 @@ def execute_using_pool(self, pool, **kwargs):
if self.revoked():
raise TaskRevokedError(task_id)
- payload = self.payload
+ body = self.body
timeout, soft_timeout = self.timeouts
timeout = timeout or task.time_limit
soft_timeout = soft_timeout or task.soft_time_limit
result = pool.apply_async(
trace_task_ret,
args=(self.name, task_id, self.request_dict,
- bytes(payload) if isinstance(payload, buffer) else payload,
+ bytes(body) if isinstance(body, buffer) else body,
self.content_type, self.content_encoding),
kwargs={'hostname': self.hostname, 'is_eager': False},
accept_callback=self.on_accepted,
@@ -221,14 +222,14 @@ def execute(self, loglevel=None, logfile=None):
if not self.task.acks_late:
self.acknowledge()
- kwargs = self.kwargs
request = self.request_dict
+ args, kwargs = self.message.payload
request.update({'loglevel': loglevel, 'logfile': logfile,
'hostname': self.hostname, 'is_eager': False,
- 'delivery_info': self.delivery_info})
- retval = trace_task(self.task, self.id, self.args, kwargs, request,
+ 'args': args, 'kwargs': kwargs})
+ retval = trace_task(self.task, self.id, args, kwargs, request,
hostname=self.hostname, loader=self.app.loader,
- app=self.app)
+ app=self.app)[0]
self.acknowledge()
return retval
@@ -313,22 +314,21 @@ def on_timeout(self, soft, timeout):
if self.task.acks_late:
self.acknowledge()
- def on_success(self, ret_value, **kwargs):
+ def on_success(self, failed__retval__runtime, **kwargs):
"""Handler called if the task was successfully processed."""
- if isinstance(ret_value, ExceptionInfo):
- if isinstance(ret_value.exception, (
- SystemExit, KeyboardInterrupt)):
- raise ret_value.exception
- return self.on_failure(ret_value)
+ failed, retval, runtime = failed__retval__runtime
+ if failed:
+ if isinstance(retval.exception, (SystemExit, KeyboardInterrupt)):
+ raise retval.exception
+ return self.on_failure(retval)
task_ready(self)
if self.task.acks_late:
self.acknowledge()
if self.eventer and self.eventer.enabled:
- result, runtime = ret_value
self.send_event(
- 'task-succeeded', result=ret_value, runtime=runtime,
+ 'task-succeeded', result=retval, runtime=runtime,
)
def on_retry(self, exc_info):
@@ -340,38 +340,36 @@ def on_retry(self, exc_info):
exception=safe_repr(exc_info.exception.exc),
traceback=safe_str(exc_info.traceback))
- def on_failure(self, exc_info):
+ def on_failure(self, exc_info, send_failed_event=True):
"""Handler called if the task raised an exception."""
task_ready(self)
- send_failed_event = True
-
- if exc_info.internal:
- if isinstance(exc_info.exception, MemoryError):
- raise MemoryError('Process got: %s' % (exc_info.exception, ))
- elif isinstance(exc_info.exception, Reject):
- self.reject(requeue=exc_info.exception.requeue)
- elif isinstance(exc_info.exception, Ignore):
- self.acknowledge()
- else:
- exc = exc_info.exception
-
- if isinstance(exc, Retry):
- return self.on_retry(exc_info)
-
- # These are special cases where the process would not have had
- # time to write the result.
- if self.store_errors:
- if isinstance(exc, WorkerLostError):
- self.task.backend.mark_as_failure(
- self.id, exc, request=self,
- )
- elif isinstance(exc, Terminated):
- self._announce_revoked(
- 'terminated', True, string(exc), False)
- send_failed_event = False # already sent revoked event
- # (acks_late) acknowledge after result stored.
- if self.task.acks_late:
- self.acknowledge()
+
+ if isinstance(exc_info.exception, MemoryError):
+ raise MemoryError('Process got: %s' % (exc_info.exception, ))
+ elif isinstance(exc_info.exception, Reject):
+ return self.reject(requeue=exc_info.exception.requeue)
+ elif isinstance(exc_info.exception, Ignore):
+ return self.acknowledge()
+
+ exc = exc_info.exception
+
+ if isinstance(exc, Retry):
+ return self.on_retry(exc_info)
+
+ # These are special cases where the process would not have had
+ # time to write the result.
+ if self.store_errors:
+ if isinstance(exc, WorkerLostError):
+ self.task.backend.mark_as_failure(
+ self.id, exc, request=self,
+ )
+ elif isinstance(exc, Terminated):
+ self._announce_revoked(
+ 'terminated', True, string(exc), False)
+ send_failed_event = False # already sent revoked event
+ # (acks_late) acknowledge after result stored.
+ if self.task.acks_late:
+ self.acknowledge()
if send_failed_event:
self.send_event(
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py
index 19a31ef90..4f12741ee 100644
--- a/celery/worker/strategy.py
+++ b/celery/worker/strategy.py
@@ -89,7 +89,7 @@ def task_message_handler(message, body, ack, reject, callbacks,
return limit_task(req, bucket, 1)
task_reserved(req)
if callbacks:
- [callback() for callback in callbacks]
+ [callback(req) for callback in callbacks]
handle(req)
return task_message_handler
diff --git a/docs/internals/protov2.rst b/docs/internals/protov2.rst
index f3c8359ca..8bbdcc4ee 100644
--- a/docs/internals/protov2.rst
+++ b/docs/internals/protov2.rst
@@ -102,8 +102,8 @@ Definition
}
headers = {
'lang': (string)'py'
- 'c_type': (string)task,
- 'task_id': (uuid)task_id,
+ 'task': (string)task,
+ 'id': (uuid)task_id,
'root_id': (uuid)root_id,
'parent_id': (uuid)parent_id,
From dbb074858e3c312c1833a53490a25c313720e471 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 9 May 2014 17:49:56 +0100
Subject: [PATCH 0110/1103] Renames celery.worker.job -> celery.worker.request
---
celery/app/trace.py | 10 +++++-----
celery/contrib/batches.py | 2 +-
celery/tests/worker/test_control.py | 2 +-
celery/tests/worker/test_request.py | 10 +++++-----
celery/tests/worker/test_worker.py | 2 +-
celery/worker/control.py | 2 +-
celery/worker/{job.py => request.py} | 4 ++--
celery/worker/state.py | 4 ++--
celery/worker/strategy.py | 2 +-
docs/internals/app-overview.rst | 2 +-
docs/internals/worker.rst | 2 +-
...celery.worker.job.rst => celery.worker.request.rst} | 6 +++---
docs/reference/index.rst | 2 +-
docs/userguide/extending.rst | 2 +-
docs/userguide/signals.rst | 2 +-
docs/whatsnew-3.1.rst | 7 ++++---
16 files changed, 31 insertions(+), 30 deletions(-)
rename celery/worker/{job.py => request.py} (99%)
rename docs/reference/{celery.worker.job.rst => celery.worker.request.rst} (57%)
diff --git a/celery/app/trace.py b/celery/app/trace.py
index 60776b9ff..c0523c3f2 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -530,9 +530,9 @@ def setup_worker_optimizations(app):
]
trace_task_ret = _fast_trace_task
- from celery.worker import job as job_module
- job_module.trace_task_ret = _fast_trace_task
- job_module.__optimize__()
+ from celery.worker import request as request_module
+ request_module.trace_task_ret = _fast_trace_task
+ request_module.__optimize__()
def reset_worker_optimizations():
@@ -546,8 +546,8 @@ def reset_worker_optimizations():
BaseTask.__call__ = _patched.pop('BaseTask.__call__')
except KeyError:
pass
- from celery.worker import job as job_module
- job_module.trace_task_ret = _trace_task_ret
+ from celery.worker import request as request_module
+ request_module.trace_task_ret = _trace_task_ret
def _install_stack_protection():
diff --git a/celery/contrib/batches.py b/celery/contrib/batches.py
index 8cabc6f61..5bfa3a902 100644
--- a/celery/contrib/batches.py
+++ b/celery/contrib/batches.py
@@ -88,7 +88,7 @@ def wot_api_real(urls):
from celery.task import Task
from celery.five import Empty, Queue
from celery.utils.log import get_logger
-from celery.worker.job import Request
+from celery.worker.request import Request
from celery.utils import noop
__all__ = ['Batches']
diff --git a/celery/tests/worker/test_control.py b/celery/tests/worker/test_control.py
index fc587f0d1..829bd9c9d 100644
--- a/celery/tests/worker/test_control.py
+++ b/celery/tests/worker/test_control.py
@@ -16,7 +16,7 @@
from celery.worker import consumer
from celery.worker import control
from celery.worker import state as worker_state
-from celery.worker.job import Request
+from celery.worker.request import Request
from celery.worker.state import revoked
from celery.worker.control import Panel
from celery.worker.pidbox import Pidbox, gPidbox
diff --git a/celery/tests/worker/test_request.py b/celery/tests/worker/test_request.py
index 280152475..e4cbddf6c 100644
--- a/celery/tests/worker/test_request.py
+++ b/celery/tests/worker/test_request.py
@@ -35,8 +35,8 @@
from celery.five import monotonic
from celery.signals import task_revoked
from celery.utils import uuid
-from celery.worker import job as module
-from celery.worker.job import Request, logger as req_logger
+from celery.worker import request as module
+from celery.worker.request import Request, logger as req_logger
from celery.worker.state import revoked
from celery.tests.case import (
@@ -247,7 +247,7 @@ def test_invalid_expires_raises_InvalidTaskError(self):
self.get_request(self.add.s(2, 2).set(expires='12345'))
def test_valid_expires_with_utc_makes_aware(self):
- with patch('celery.worker.job.maybe_make_aware') as mma:
+ with patch('celery.worker.request.maybe_make_aware') as mma:
self.get_request(self.add.s(2, 2).set(expires=10))
self.assertTrue(mma.called)
@@ -592,8 +592,8 @@ def test_from_message_invalid_kwargs(self):
with self.assertRaises(InvalidTaskError):
raise req.execute().exception
- @patch('celery.worker.job.error')
- @patch('celery.worker.job.warn')
+ @patch('celery.worker.request.error')
+ @patch('celery.worker.request.warn')
def test_on_timeout(self, warn, error):
job = self.xRequest()
diff --git a/celery/tests/worker/test_worker.py b/celery/tests/worker/test_worker.py
index e939a4e8b..1596c4616 100644
--- a/celery/tests/worker/test_worker.py
+++ b/celery/tests/worker/test_worker.py
@@ -24,7 +24,7 @@
from celery.worker import components
from celery.worker import consumer
from celery.worker.consumer import Consumer as __Consumer
-from celery.worker.job import Request
+from celery.worker.request import Request
from celery.utils import worker_direct
from celery.utils.serialization import pickle
from celery.utils.timer2 import Timer
diff --git a/celery/worker/control.py b/celery/worker/control.py
index 2067d4043..b0bb93fde 100644
--- a/celery/worker/control.py
+++ b/celery/worker/control.py
@@ -22,8 +22,8 @@
from celery.utils import jsonify
from . import state as worker_state
+from .request import Request
from .state import revoked
-from .job import Request
__all__ = ['Panel']
DEFAULT_TASK_INFO_ITEMS = ('exchange', 'routing_key', 'rate_limit')
diff --git a/celery/worker/job.py b/celery/worker/request.py
similarity index 99%
rename from celery/worker/job.py
rename to celery/worker/request.py
index 0fd57f95a..949cc95a3 100644
--- a/celery/worker/job.py
+++ b/celery/worker/request.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
- celery.worker.job
- ~~~~~~~~~~~~~~~~~
+ celery.worker.request
+ ~~~~~~~~~~~~~~~~~~~~~
This module defines the :class:`Request` class,
which specifies how tasks are executed.
diff --git a/celery/worker/state.py b/celery/worker/state.py
index 8abaa5d73..f2f7a79fa 100644
--- a/celery/worker/state.py
+++ b/celery/worker/state.py
@@ -42,10 +42,10 @@
#: being expired when the max limit has been exceeded.
REVOKE_EXPIRES = 10800
-#: set of all reserved :class:`~celery.worker.job.Request`'s.
+#: set of all reserved :class:`~celery.worker.request.Request`'s.
reserved_requests = set()
-#: set of currently active :class:`~celery.worker.job.Request`'s.
+#: set of currently active :class:`~celery.worker.request.Request`'s.
active_requests = set()
#: count of tasks accepted by the worker, sorted by type.
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py
index 4f12741ee..397aa4f24 100644
--- a/celery/worker/strategy.py
+++ b/celery/worker/strategy.py
@@ -15,7 +15,7 @@
from celery.utils.log import get_logger
from celery.utils.timeutils import timezone
-from .job import Request, RequestV1
+from .request import Request, RequestV1
from .state import task_reserved
__all__ = ['default']
diff --git a/docs/internals/app-overview.rst b/docs/internals/app-overview.rst
index 33dd4e815..602f33d29 100644
--- a/docs/internals/app-overview.rst
+++ b/docs/internals/app-overview.rst
@@ -226,7 +226,7 @@ App Dependency Tree
* celery.apps.worker.Worker
* celery.worker.WorkerController
* celery.worker.consumer.Consumer
- * celery.worker.job.TaskRequest
+ * celery.worker.request.Request
* celery.events.EventDispatcher
* celery.worker.control.ControlDispatch
* celery.woker.control.registry.Panel
diff --git a/docs/internals/worker.rst b/docs/internals/worker.rst
index 30eb64185..c1695cb48 100644
--- a/docs/internals/worker.rst
+++ b/docs/internals/worker.rst
@@ -35,7 +35,7 @@ Receives messages from the broker using `Kombu`_.
.. _`Kombu`: http://pypi.python.org/pypi/kombu
When a message is received it's converted into a
-:class:`celery.worker.job.TaskRequest` object.
+:class:`celery.worker.request.Request` object.
Tasks with an ETA, or rate-limit are entered into the `timer`,
messages that can be immediately processed are sent to the execution pool.
diff --git a/docs/reference/celery.worker.job.rst b/docs/reference/celery.worker.request.rst
similarity index 57%
rename from docs/reference/celery.worker.job.rst
rename to docs/reference/celery.worker.request.rst
index 36fc1a7b3..8821d6bef 100644
--- a/docs/reference/celery.worker.job.rst
+++ b/docs/reference/celery.worker.request.rst
@@ -1,11 +1,11 @@
=====================================
- celery.worker.job
+ celery.worker.request
=====================================
.. contents::
:local:
-.. currentmodule:: celery.worker.job
+.. currentmodule:: celery.worker.request
-.. automodule:: celery.worker.job
+.. automodule:: celery.worker.request
:members:
:undoc-members:
diff --git a/docs/reference/index.rst b/docs/reference/index.rst
index 5f1c72a08..a8ae3ea02 100644
--- a/docs/reference/index.rst
+++ b/docs/reference/index.rst
@@ -47,7 +47,7 @@
celery.apps.beat
celery.worker
celery.worker.consumer
- celery.worker.job
+ celery.worker.request
celery.worker.state
celery.worker.strategy
celery.bin.base
diff --git a/docs/userguide/extending.rst b/docs/userguide/extending.rst
index 41878034f..a4417c78a 100644
--- a/docs/userguide/extending.rst
+++ b/docs/userguide/extending.rst
@@ -463,7 +463,7 @@ Methods
.. method:: apply_eta_task(request)
Schedule eta task to execute based on the ``request.eta`` attribute.
- (:class:`~celery.worker.job.Request`)
+ (:class:`~celery.worker.request.Request`)
diff --git a/docs/userguide/signals.rst b/docs/userguide/signals.rst
index 4d6d72e69..7b927472b 100644
--- a/docs/userguide/signals.rst
+++ b/docs/userguide/signals.rst
@@ -271,7 +271,7 @@ Provides arguments:
* request
- This is a :class:`~celery.worker.job.Request` instance, and not
+ This is a :class:`~celery.worker.request.Request` instance, and not
``task.request``. When using the prefork pool this signal
is dispatched in the parent process, so ``task.request`` is not available
and should not be used. Use this object instead, which should have many
diff --git a/docs/whatsnew-3.1.rst b/docs/whatsnew-3.1.rst
index 6ac166166..99b6c3ad3 100644
--- a/docs/whatsnew-3.1.rst
+++ b/docs/whatsnew-3.1.rst
@@ -1072,8 +1072,9 @@ In Other News
(Issue #1555).
The revoked signal is dispatched after the task request is removed from
- the stack, so it must instead use the :class:`~celery.worker.job.Request`
- object to get information about the task.
+ the stack, so it must instead use the
+ :class:`~celery.worker.request.Request` object to get information
+ about the task.
- Worker: New :option:`-X` command line argument to exclude queues
(Issue #1399).
@@ -1235,7 +1236,7 @@ Internal changes
- Result backends (:class:`celery.backends.base.BaseBackend`)
- :class:`celery.worker.WorkController`
- :class:`celery.worker.Consumer`
- - :class:`celery.worker.job.Request`
+ - :class:`celery.worker.request.Request`
This means that you have to pass a specific app when instantiating
these classes.
From 67e0dec844269d364d275e1c1c21546c249fcd3b Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 9 May 2014 17:50:54 +0100
Subject: [PATCH 0111/1103] Removes module celery.task.trace as scheduled for
3.2
---
celery/task/trace.py | 12 ------------
extra/release/doc4allmods | 1 -
2 files changed, 13 deletions(-)
delete mode 100644 celery/task/trace.py
diff --git a/celery/task/trace.py b/celery/task/trace.py
deleted file mode 100644
index 5e5f5a8e9..000000000
--- a/celery/task/trace.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""This module has moved to celery.app.trace."""
-from __future__ import absolute_import
-
-import sys
-
-from celery.utils import warn_deprecated
-
-warn_deprecated('celery.task.trace', removal='3.2',
- alternative='Please use celery.app.trace instead.')
-
-from celery.app import trace
-sys.modules[__name__] = trace
diff --git a/extra/release/doc4allmods b/extra/release/doc4allmods
index b08b769eb..c36cb6273 100755
--- a/extra/release/doc4allmods
+++ b/extra/release/doc4allmods
@@ -7,7 +7,6 @@ SKIP_FILES="celery.five.rst
celery.task.rst
celery.task.base.rst
celery.task.sets.rst
- celery.task.trace.rst
celery.bin.rst
celery.bin.celeryd_detach.rst
celery.contrib.rst
From 2e1cad9e046feb79c0954157e05f3386e3f649d4 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 9 May 2014 18:01:37 +0100
Subject: [PATCH 0112/1103] Removes magic keyword arguments support
---
celery/_state.py | 4 +--
celery/app/base.py | 19 +++++---------
celery/app/builtins.py | 3 ---
celery/app/task.py | 26 +++----------------
celery/app/trace.py | 12 ++++-----
celery/app/utils.py | 1 -
celery/five.py | 9 +------
celery/task/base.py | 4 +--
celery/task/http.py | 3 +--
celery/tests/app/test_app.py | 4 +--
celery/tests/compat_modules/test_compat.py | 23 ----------------
.../tests/compat_modules/test_compat_utils.py | 4 ---
.../tests/compat_modules/test_decorators.py | 1 -
celery/tests/tasks/test_tasks.py | 4 ---
celery/tests/worker/test_request.py | 3 ---
celery/utils/__init__.py | 9 -------
16 files changed, 21 insertions(+), 108 deletions(-)
diff --git a/celery/_state.py b/celery/_state.py
index a76b3f4b7..7592ca242 100644
--- a/celery/_state.py
+++ b/celery/_state.py
@@ -77,10 +77,8 @@ def _get_current_app():
#: creates the global fallback app instance.
from celery.app import Celery
set_default_app(Celery(
- 'default',
+ 'default', fixups=[], set_as_current=False,
loader=os.environ.get('CELERY_LOADER') or 'default',
- fixups=[],
- set_as_current=False, accept_magic_kwargs=True,
))
return _tls.current_app or default_app
diff --git a/celery/app/base.py b/celery/app/base.py
index 02590025a..04b1749a2 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -127,11 +127,13 @@ class Celery(object):
#: Signal sent after app has been finalized.
on_after_finalize = None
+ #: ignored
+ accept_magic_kwargs = False
+
def __init__(self, main=None, loader=None, backend=None,
amqp=None, events=None, log=None, control=None,
- set_as_current=True, accept_magic_kwargs=False,
- tasks=None, broker=None, include=None, changes=None,
- config_source=None, fixups=None, task_cls=None,
+ set_as_current=True, tasks=None, broker=None, include=None,
+ changes=None, config_source=None, fixups=None, task_cls=None,
autofinalize=True, **kwargs):
self.clock = LamportClock()
self.main = main
@@ -144,7 +146,6 @@ def __init__(self, main=None, loader=None, backend=None,
self.task_cls = task_cls or self.task_cls
self.set_as_current = set_as_current
self.registry_cls = symbol_by_name(self.registry_cls)
- self.accept_magic_kwargs = accept_magic_kwargs
self.user_options = defaultdict(set)
self.steps = defaultdict(set)
self.autofinalize = autofinalize
@@ -239,12 +240,6 @@ def _create_task_cls(fun):
cons = lambda app: app._task_from_fun(fun, **opts)
cons.__name__ = fun.__name__
connect_on_app_finalize(cons)
- if self.accept_magic_kwargs: # compat mode
- task = self._task_from_fun(fun, **opts)
- if filter:
- task = filter(task)
- return task
-
if self.finalized or opts.get('_force_evaluate'):
ret = self._task_from_fun(fun, **opts)
else:
@@ -276,7 +271,6 @@ def _task_from_fun(self, fun, **options):
T = type(fun.__name__, (base, ), dict({
'app': self,
- 'accept_magic_kwargs': False,
'run': fun if bind else staticmethod(fun),
'_decorated': True,
'__doc__': fun.__doc__,
@@ -581,7 +575,6 @@ def __reduce_keys__(self):
'events': self.events_cls,
'log': self.log_cls,
'control': self.control_cls,
- 'accept_magic_kwargs': self.accept_magic_kwargs,
'fixups': self.fixups,
'config_source': self._config_source,
'task_cls': self.task_cls,
@@ -592,7 +585,7 @@ def __reduce_args__(self):
return (self.main, self.conf.changes,
self.loader_cls, self.backend_cls, self.amqp_cls,
self.events_cls, self.log_cls, self.control_cls,
- self.accept_magic_kwargs, self._config_source)
+ False, self._config_source)
@cached_property
def Worker(self):
diff --git a/celery/app/builtins.py b/celery/app/builtins.py
index 81d5f074c..0100cb86c 100644
--- a/celery/app/builtins.py
+++ b/celery/app/builtins.py
@@ -144,7 +144,6 @@ def add_group_task(app):
class Group(app.Task):
app = _app
name = 'celery.group'
- accept_magic_kwargs = False
_decorated = True
def run(self, tasks, result, group_id, partial_args,
@@ -172,7 +171,6 @@ def add_chain_task(app):
class Chain(app.Task):
app = _app
name = 'celery.chain'
- accept_magic_kwargs = False
_decorated = True
return Chain
@@ -188,7 +186,6 @@ def add_chord_task(app):
class Chord(app.Task):
app = _app
name = 'celery.chord'
- accept_magic_kwargs = False
ignore_result = False
_decorated = True
diff --git a/celery/app/task.py b/celery/app/task.py
index 705c26269..1fa6b3381 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -20,7 +20,7 @@
from celery.five import class_property, items, with_metaclass
from celery.local import Proxy
from celery.result import EagerResult
-from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise
+from celery.utils import gen_task_name, uuid, maybe_reraise
from celery.utils.functional import mattrgetter, maybe_list
from celery.utils.imports import instantiate
from celery.utils.mail import ErrorMail
@@ -237,10 +237,6 @@ class Task(object):
#: If :const:`True` the task is an abstract base class.
abstract = True
- #: If disabled the worker will not forward magic keyword arguments.
- #: Deprecated and scheduled for removal in v4.0.
- accept_magic_kwargs = False
-
#: Maximum number of retries before giving up. If set to :const:`None`,
#: it will **never** stop retrying.
max_retries = 3
@@ -345,6 +341,9 @@ class Task(object):
'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
)
+ #: ignored
+ accept_magic_kwargs = False
+
_backend = None # set by backend property.
__bound__ = False
@@ -362,8 +361,6 @@ def bind(self, app):
for attr_name, config_name in self.from_config:
if getattr(self, attr_name, None) is None:
setattr(self, attr_name, conf[config_name])
- if self.accept_magic_kwargs is None:
- self.accept_magic_kwargs = app.accept_magic_kwargs
# decorate with annotations from config.
if not was_bound:
@@ -720,21 +717,6 @@ def apply(self, args=None, kwargs=None,
'errbacks': maybe_list(link_error),
'headers': options.get('headers'),
'delivery_info': {'is_eager': True}}
- if self.accept_magic_kwargs:
- default_kwargs = {'task_name': task.name,
- 'task_id': task_id,
- 'task_retries': retries,
- 'task_is_eager': True,
- 'logfile': options.get('logfile'),
- 'loglevel': options.get('loglevel', 0),
- 'delivery_info': {'is_eager': True}}
- supported_keys = fun_takes_kwargs(task.run, default_kwargs)
- extend_with = {
- key: val for key, val in items(default_kwargs)
- if key in supported_keys
- }
- kwargs.update(extend_with)
-
tb = None
tracer = build_tracer(
task.name, task, eager=True,
diff --git a/celery/app/trace.py b/celery/app/trace.py
index c0523c3f2..c21ef1ee2 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -467,10 +467,11 @@ def _trace_task_ret(name, uuid, request, body, content_type,
trace_task_ret = _trace_task_ret
-def _fast_trace_task_v1(task, uuid, args, kwargs, request={}):
+def _fast_trace_task_v1(task, uuid, args, kwargs, request={}, _loc=_localized):
# setup_worker_optimizations will point trace_task_ret to here,
# so this is the function used in the worker.
- R, I, T, Rstr = _tasks[task].__trace__(uuid, args, kwargs, request)[0]
+ tasks, _ = _loc
+ R, I, T, Rstr = tasks[task].__trace__(uuid, args, kwargs, request)[0]
# exception instance if error, else result text
return (1, R, T) if I else (0, Rstr, T)
@@ -479,11 +480,8 @@ def _fast_trace_task(task, uuid, request, body, content_type,
content_encoding, loads=loads_message, _loc=_localized,
**extra_request):
tasks, accept = _loc
- try:
- args, kwargs = loads(body, content_type, content_encoding,
- accept=accept)
- except Exception as exc:
- print('OH NOEEES: %r' % (exc, ))
+ args, kwargs = loads(body, content_type, content_encoding,
+ accept=accept)
request.update(args=args, kwargs=kwargs, **extra_request)
R, I, T, Rstr = tasks[task].__trace__(
uuid, args, kwargs, request,
diff --git a/celery/app/utils.py b/celery/app/utils.py
index a409d8fac..d017de2a3 100644
--- a/celery/app/utils.py
+++ b/celery/app/utils.py
@@ -152,7 +152,6 @@ def build_standard_kwargs(self, main, changes, loader, backend, amqp,
return dict(main=main, loader=loader, backend=backend, amqp=amqp,
changes=changes, events=events, log=log, control=control,
set_as_current=False,
- accept_magic_kwargs=accept_magic_kwargs,
config_source=config_source)
def construct(self, cls, **kwargs):
diff --git a/celery/five.py b/celery/five.py
index 56c640ac8..b7fe25eac 100644
--- a/celery/five.py
+++ b/celery/five.py
@@ -210,15 +210,8 @@ def getappattr(path):
return current_app._rgetattr(path)
-def _compat_task_decorator(*args, **kwargs):
- from celery import current_app
- kwargs.setdefault('accept_magic_kwargs', True)
- return current_app.task(*args, **kwargs)
-
-
def _compat_periodic_task_decorator(*args, **kwargs):
from celery.task import periodic_task
- kwargs.setdefault('accept_magic_kwargs', True)
return periodic_task(*args, **kwargs)
@@ -228,7 +221,7 @@ def _compat_periodic_task_decorator(*args, **kwargs):
'send_task': 'send_task',
},
'decorators': {
- 'task': _compat_task_decorator,
+ 'task': 'task',
'periodic_task': _compat_periodic_task_decorator,
},
'log': {
diff --git a/celery/task/base.py b/celery/task/base.py
index 6feffc48d..a47885aeb 100644
--- a/celery/task/base.py
+++ b/celery/task/base.py
@@ -51,7 +51,6 @@ class Task(BaseTask):
priority = None
type = 'regular'
disable_error_emails = False
- accept_magic_kwargs = False
from_config = BaseTask.from_config + (
('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
@@ -178,8 +177,7 @@ def on_bound(cls, app):
def task(*args, **kwargs):
"""Deprecated decorator, please use :func:`celery.task`."""
- return current_app.task(*args, **dict({'accept_magic_kwargs': False,
- 'base': Task}, **kwargs))
+ return current_app.task(*args, **dict({'base': Task}, **kwargs))
def periodic_task(*args, **options):
diff --git a/celery/task/http.py b/celery/task/http.py
index 2c9d8604b..62b89b896 100644
--- a/celery/task/http.py
+++ b/celery/task/http.py
@@ -162,8 +162,7 @@ def http_headers(self):
return headers
-@shared_task(name='celery.http_dispatch', bind=True,
- url=None, method=None, accept_magic_kwargs=False)
+@shared_task(name='celery.http_dispatch', bind=True, url=None, method=None)
def dispatch(self, url=None, method='GET', **kwargs):
"""Task dispatching to an URL.
diff --git a/celery/tests/app/test_app.py b/celery/tests/app/test_app.py
index 5bb1ef61e..69187d0a0 100644
--- a/celery/tests/app/test_app.py
+++ b/celery/tests/app/test_app.py
@@ -258,7 +258,7 @@ def foo():
self.assertFalse(sh.called)
def test_task_compat_with_filter(self):
- with self.Celery(accept_magic_kwargs=True) as app:
+ with self.Celery() as app:
check = Mock()
def filter(task):
@@ -271,7 +271,7 @@ def foo():
check.assert_called_with(foo)
def test_task_with_filter(self):
- with self.Celery(accept_magic_kwargs=False) as app:
+ with self.Celery() as app:
check = Mock()
def filter(task):
diff --git a/celery/tests/compat_modules/test_compat.py b/celery/tests/compat_modules/test_compat.py
index d285188e0..58f0cea0c 100644
--- a/celery/tests/compat_modules/test_compat.py
+++ b/celery/tests/compat_modules/test_compat.py
@@ -15,29 +15,6 @@
from celery.tests.case import AppCase, depends_on_current_app
-class test_Task(AppCase):
-
- def test_base_task_inherits_magic_kwargs_from_app(self):
- from celery.task import Task as OldTask
-
- class timkX(OldTask):
- abstract = True
-
- with self.Celery(set_as_current=False,
- accept_magic_kwargs=True) as app:
- timkX.bind(app)
- # see #918
- self.assertFalse(timkX.accept_magic_kwargs)
-
- from celery import Task as NewTask
-
- class timkY(NewTask):
- abstract = True
-
- timkY.bind(app)
- self.assertFalse(timkY.accept_magic_kwargs)
-
-
@depends_on_current_app
class test_periodic_tasks(AppCase):
diff --git a/celery/tests/compat_modules/test_compat_utils.py b/celery/tests/compat_modules/test_compat_utils.py
index b041a0b3e..d1ef81a98 100644
--- a/celery/tests/compat_modules/test_compat_utils.py
+++ b/celery/tests/compat_modules/test_compat_utils.py
@@ -40,11 +40,7 @@ def test_decorators_task(self):
def _test_decorators_task():
pass
- self.assertTrue(_test_decorators_task.accept_magic_kwargs)
-
def test_decorators_periodic_task(self):
@celery.decorators.periodic_task(run_every=3600)
def _test_decorators_ptask():
pass
-
- self.assertTrue(_test_decorators_ptask.accept_magic_kwargs)
diff --git a/celery/tests/compat_modules/test_decorators.py b/celery/tests/compat_modules/test_decorators.py
index 9f5dff947..df95916ae 100644
--- a/celery/tests/compat_modules/test_decorators.py
+++ b/celery/tests/compat_modules/test_decorators.py
@@ -27,7 +27,6 @@ def setup(self):
def assertCompatDecorator(self, decorator, type, **opts):
task = decorator(**opts)(add)
self.assertEqual(task(8, 8), 16)
- self.assertTrue(task.accept_magic_kwargs)
self.assertIsInstance(task, type)
def test_task(self):
diff --git a/celery/tests/tasks/test_tasks.py b/celery/tests/tasks/test_tasks.py
index 5607c255d..6f11ba1be 100644
--- a/celery/tests/tasks/test_tasks.py
+++ b/celery/tests/tasks/test_tasks.py
@@ -363,10 +363,6 @@ def test_task_class_repr(self):
self.mytask.app.Task._app = None
self.assertIn('unbound', repr(self.mytask.app.Task, ))
- def test_bind_no_magic_kwargs(self):
- self.mytask.accept_magic_kwargs = None
- self.mytask.bind(self.mytask.app)
-
def test_annotate(self):
with patch('celery.app.task.resolve_all_annotations') as anno:
anno.return_value = [{'FOO': 'BAR'}]
diff --git a/celery/tests/worker/test_request.py b/celery/tests/worker/test_request.py
index e4cbddf6c..02f065df1 100644
--- a/celery/tests/worker/test_request.py
+++ b/celery/tests/worker/test_request.py
@@ -835,9 +835,6 @@ def apply_async(self, target, args=None, kwargs=None,
self.assertEqual(p.args[1], tid)
self.assertEqual(p.args[3], job.message.body)
- job.task.accept_magic_kwargs = False
- job.execute_using_pool(p)
-
def _test_on_failure(self, exception):
tid = uuid()
job = self.xRequest(id=tid, args=[4])
diff --git a/celery/utils/__init__.py b/celery/utils/__init__.py
index 7bf6c3137..a3264b8d6 100644
--- a/celery/utils/__init__.py
+++ b/celery/utils/__init__.py
@@ -19,7 +19,6 @@
from collections import Callable
from functools import partial, wraps
-from inspect import getargspec
from pprint import pprint
from kombu.entity import Exchange, Queue
@@ -189,14 +188,6 @@ def is_iterable(obj):
return True
-def fun_takes_kwargs(fun, kwlist=[]):
- # deprecated
- S = getattr(fun, 'argspec', getargspec(fun))
- if S.keywords is not None:
- return kwlist
- return [kw for kw in kwlist if kw in S.args]
-
-
def isatty(fh):
try:
return fh.isatty()
From 469fd49d619131cd620262e304a7e07984f47a01 Mon Sep 17 00:00:00 2001
From: Jonas Haag
Date: Mon, 12 May 2014 14:38:49 +0200
Subject: [PATCH 0113/1103] Clarify behavior of rate limits in docs
---
docs/userguide/tasks.rst | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/docs/userguide/tasks.rst b/docs/userguide/tasks.rst
index 8e7cb0739..24df1cdb9 100644
--- a/docs/userguide/tasks.rst
+++ b/docs/userguide/tasks.rst
@@ -514,10 +514,14 @@ General
If it is an integer or float, it is interpreted as "tasks per second".
The rate limits can be specified in seconds, minutes or hours
- by appending `"/s"`, `"/m"` or `"/h"` to the value.
- Example: `"100/m"` (hundred tasks a minute). Default is the
- :setting:`CELERY_DEFAULT_RATE_LIMIT` setting, which if not specified means
- rate limiting for tasks is disabled by default.
+ by appending `"/s"`, `"/m"` or `"/h"` to the value. Tasks will be evenly
+ distributed over the specified time frame.
+
+ Example: `"100/m"` (hundred tasks a minute). This will enforce a minimum
+ delay of 10ms between starting two tasks.
+
+ Default is the :setting:`CELERY_DEFAULT_RATE_LIMIT` setting,
+ which if not specified means rate limiting for tasks is disabled by default.
.. attribute:: Task.time_limit
From 924f3076f6fe6ad133217123a357f9c98e967366 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 13 May 2014 12:46:36 +0100
Subject: [PATCH 0114/1103] PromiseProxy: Only remove original object if
evaluation succeeded
---
celery/local.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/celery/local.py b/celery/local.py
index ada6e9381..e042fa0a0 100644
--- a/celery/local.py
+++ b/celery/local.py
@@ -251,7 +251,9 @@ def __evaluate__(self,
thing = Proxy._get_current_object(self)
object.__setattr__(self, '__thing', thing)
return thing
- finally:
+ except:
+ raise
+ else:
for attr in _clean:
try:
object.__delattr__(self, attr)
From 2d68f8ddace82c4e490465faba865cf27826603a Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 14 May 2014 16:53:37 +0100
Subject: [PATCH 0115/1103] Fixes weird traceback issues with
connection_or_acquire + producer_or_acquire
---
celery/app/base.py | 32 ++++++++++++-----------------
celery/utils/objects.py | 45 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 58 insertions(+), 19 deletions(-)
diff --git a/celery/app/base.py b/celery/app/base.py
index 04b1749a2..18c280a18 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -9,6 +9,7 @@
from __future__ import absolute_import
import os
+import sys
import threading
import warnings
@@ -37,7 +38,7 @@
from celery.utils.dispatch import Signal
from celery.utils.functional import first, maybe_list
from celery.utils.imports import instantiate, symbol_by_name
-from celery.utils.objects import mro_lookup
+from celery.utils.objects import FallbackContext, mro_lookup
from .annotations import prepare as prepare_annotations
from .defaults import DEFAULTS, find_deprecated_settings
@@ -406,27 +407,20 @@ def connection(self, hostname=None, userid=None, password=None,
)
broker_connection = connection
- @contextmanager
- def connection_or_acquire(self, connection=None, pool=True,
- *args, **kwargs):
- if connection:
- yield connection
- else:
- if pool:
- with self.pool.acquire(block=True) as connection:
- yield connection
- else:
- with self.connection() as connection:
- yield connection
+ def _acquire_connection(self, pool=True):
+ """Helper for :meth:`connection_or_acquire`."""
+ if pool:
+ return self.pool.acquire(block=True)
+ return self.connection()
+
+ def connection_or_acquire(self, connection=None, pool=True, *_, **__):
+ return FallbackContext(connection, self._acquire_connection, pool=pool)
default_connection = connection_or_acquire # XXX compat
- @contextmanager
def producer_or_acquire(self, producer=None):
- if producer:
- yield producer
- else:
- with self.amqp.producer_pool.acquire(block=True) as producer:
- yield producer
+ return FallbackContext(
+ producer, self.amqp.producer_pool.acquire, block=True,
+ )
default_producer = producer_or_acquire # XXX compat
def prepare_config(self, c):
diff --git a/celery/utils/objects.py b/celery/utils/objects.py
index b2ad646b3..c81550af2 100644
--- a/celery/utils/objects.py
+++ b/celery/utils/objects.py
@@ -35,3 +35,48 @@ def mro_lookup(cls, attr, stop=(), monkey_patched=[]):
return
if attr in node.__dict__:
return node
+
+
+class FallbackContext(object):
+ """The built-in ``@contextmanager`` utility does not work well
+ when wrapping other contexts, as the traceback is wrong when
+ the wrapped context raises.
+
+ This solves this problem and can be used instead of ``@contextmanager``
+ in this example::
+
+ @contextmanager
+ def connection_or_default_connection(connection=None):
+ if connection:
+ # user already has a connection, should not close
+ # after use
+ yield connection
+ else:
+ # must have new connection, and also close the connection
+ # after the block returns
+ with create_new_connection() as connection:
+ yield connection
+
+ This wrapper can be used instead for the above like this::
+
+ def connection_or_default_connection(connection=None):
+ return FallbackContext(connection, create_new_connection)
+
+ """
+
+ def __init__(self, provided, fallback, *fb_args, **fb_kwargs):
+ self.provided = provided
+ self.fallback = fallback
+ self.fb_args = fb_args
+ self.fb_kwargs = fb_kwargs
+ self._context = None
+
+ def __enter__(self):
+ if self.provided is not None:
+ return self.provided
+ context = self._context = self.fallback(*self.fb_args, **self.fb_kwargs).__enter__()
+ return context
+
+ def __exit__(self, *exc_info):
+ if self._context is not None:
+ return self._context.__exit__(*exc_info)
From 6c08d2acdf2cf1b4a583f594d11384f51e0c3aba Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 14 May 2014 16:53:37 +0100
Subject: [PATCH 0116/1103] Fixes weird traceback issues with
connection_or_acquire + producer_or_acquire
---
celery/app/base.py | 32 ++++++++++++-----------------
celery/utils/objects.py | 45 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 58 insertions(+), 19 deletions(-)
diff --git a/celery/app/base.py b/celery/app/base.py
index b0079f7db..66367e6f7 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -9,6 +9,7 @@
from __future__ import absolute_import
import os
+import sys
import threading
import warnings
@@ -37,7 +38,7 @@
from celery.utils.dispatch import Signal
from celery.utils.functional import first, maybe_list
from celery.utils.imports import instantiate, symbol_by_name
-from celery.utils.objects import mro_lookup
+from celery.utils.objects import FallbackContext, mro_lookup
from .annotations import prepare as prepare_annotations
from .defaults import DEFAULTS, find_deprecated_settings
@@ -411,27 +412,20 @@ def connection(self, hostname=None, userid=None, password=None,
)
broker_connection = connection
- @contextmanager
- def connection_or_acquire(self, connection=None, pool=True,
- *args, **kwargs):
- if connection:
- yield connection
- else:
- if pool:
- with self.pool.acquire(block=True) as connection:
- yield connection
- else:
- with self.connection() as connection:
- yield connection
+ def _acquire_connection(self, pool=True):
+ """Helper for :meth:`connection_or_acquire`."""
+ if pool:
+ return self.pool.acquire(block=True)
+ return self.connection()
+
+ def connection_or_acquire(self, connection=None, pool=True, *_, **__):
+ return FallbackContext(connection, self._acquire_connection, pool=pool)
default_connection = connection_or_acquire # XXX compat
- @contextmanager
def producer_or_acquire(self, producer=None):
- if producer:
- yield producer
- else:
- with self.amqp.producer_pool.acquire(block=True) as producer:
- yield producer
+ return FallbackContext(
+ producer, self.amqp.producer_pool.acquire, block=True,
+ )
default_producer = producer_or_acquire # XXX compat
def prepare_config(self, c):
diff --git a/celery/utils/objects.py b/celery/utils/objects.py
index b2ad646b3..c81550af2 100644
--- a/celery/utils/objects.py
+++ b/celery/utils/objects.py
@@ -35,3 +35,48 @@ def mro_lookup(cls, attr, stop=(), monkey_patched=[]):
return
if attr in node.__dict__:
return node
+
+
+class FallbackContext(object):
+ """The built-in ``@contextmanager`` utility does not work well
+ when wrapping other contexts, as the traceback is wrong when
+ the wrapped context raises.
+
+ This solves this problem and can be used instead of ``@contextmanager``
+ in this example::
+
+ @contextmanager
+ def connection_or_default_connection(connection=None):
+ if connection:
+ # user already has a connection, should not close
+ # after use
+ yield connection
+ else:
+ # must have new connection, and also close the connection
+ # after the block returns
+ with create_new_connection() as connection:
+ yield connection
+
+ This wrapper can be used instead for the above like this::
+
+ def connection_or_default_connection(connection=None):
+ return FallbackContext(connection, create_new_connection)
+
+ """
+
+ def __init__(self, provided, fallback, *fb_args, **fb_kwargs):
+ self.provided = provided
+ self.fallback = fallback
+ self.fb_args = fb_args
+ self.fb_kwargs = fb_kwargs
+ self._context = None
+
+ def __enter__(self):
+ if self.provided is not None:
+ return self.provided
+ context = self._context = self.fallback(*self.fb_args, **self.fb_kwargs).__enter__()
+ return context
+
+ def __exit__(self, *exc_info):
+ if self._context is not None:
+ return self._context.__exit__(*exc_info)
From ca8b519222a13b11e3e4e5d05011b230ae548ad1 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 15 May 2014 12:55:17 +0100
Subject: [PATCH 0117/1103] Time limit in message is now a list
---
celery/app/amqp.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index e8e801529..5f9813e73 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -306,7 +306,7 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
'group': group_id,
'chord': chord,
'retries': retries,
- 'timelimit': (time_limit, soft_time_limit),
+ 'timelimit': [time_limit, soft_time_limit],
'root_id': root_id,
'parent_id': parent_id,
},
From c7c5fc4d2f9e3596d79dbc64e6c624edfddda373 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 15 May 2014 13:23:59 +0100
Subject: [PATCH 0118/1103] Removes dead code
---
celery/canvas.py | 3 ---
1 file changed, 3 deletions(-)
diff --git a/celery/canvas.py b/celery/canvas.py
index 16924eeba..2c2883bcf 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -609,9 +609,6 @@ def apply_async(self, args=(), kwargs=None, add_to_parent=True,
parent_task.add_trail(result)
return result
- return type(*type.prepare(dict(self.options, **options), tasks, args),
- add_to_parent=add_to_parent)
-
def apply(self, args=(), kwargs={}, **options):
app = self.app
if not self.tasks:
From 904e2af39b232c8b0358ba9e08248c2989bc4669 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 15 May 2014 14:04:42 +0100
Subject: [PATCH 0119/1103] Exception sent by JSON serialized worker can be
read by pickle configured worker
---
celery/backends/base.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/celery/backends/base.py b/celery/backends/base.py
index 41ce1ef17..aec6dd3f4 100644
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -165,11 +165,11 @@ def prepare_exception(self, exc, serializer=None):
def exception_to_python(self, exc):
"""Convert serialized exception to Python exception."""
- if self.serializer in EXCEPTION_ABLE_CODECS:
- return get_pickled_exception(exc)
- elif not isinstance(exc, BaseException):
- return create_exception_cls(
+ if not isinstance(exc, BaseException):
+ exc = create_exception_cls(
from_utf8(exc['exc_type']), __name__)(exc['exc_message'])
+ if self.serializer in EXCEPTION_ABLE_CODECS:
+ exc = get_pickled_exception(exc)
return exc
def prepare_value(self, result):
From acee6680ce009688b5d497b245466e5f02d7afb2 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 15 May 2014 14:06:52 +0100
Subject: [PATCH 0120/1103] Needs to copy buffer into bytes early so that
librabbitmq does not release the buffer
---
celery/concurrency/base.py | 1 +
celery/concurrency/solo.py | 1 +
celery/five.py | 8 ++++++++
celery/worker/consumer.py | 10 +---------
celery/worker/request.py | 7 +++----
celery/worker/strategy.py | 10 ++++++++--
6 files changed, 22 insertions(+), 15 deletions(-)
diff --git a/celery/concurrency/base.py b/celery/concurrency/base.py
index 24b39a1ee..4913ffb27 100644
--- a/celery/concurrency/base.py
+++ b/celery/concurrency/base.py
@@ -72,6 +72,7 @@ class BasePool(object):
uses_semaphore = False
task_join_will_block = True
+ body_can_be_buffer = False
def __init__(self, limit=None, putlocks=True,
forking_enable=True, callbacks_propagate=(), **options):
diff --git a/celery/concurrency/solo.py b/celery/concurrency/solo.py
index a2dc19970..a83f46219 100644
--- a/celery/concurrency/solo.py
+++ b/celery/concurrency/solo.py
@@ -17,6 +17,7 @@
class TaskPool(BasePool):
"""Solo task pool (blocking, inline, fast)."""
+ body_can_be_buffer = True
def __init__(self, *args, **kwargs):
super(TaskPool, self).__init__(*args, **kwargs)
diff --git a/celery/five.py b/celery/five.py
index b7fe25eac..bfe9ff0cd 100644
--- a/celery/five.py
+++ b/celery/five.py
@@ -28,6 +28,14 @@
def Counter(): # noqa
return defaultdict(int)
+try:
+ buffer_t = buffer
+except NameError: # pragma: no cover
+ # Py3 does not have buffer, but we only need isinstance.
+
+ class buffer_t(object): # noqa
+ pass
+
############## py3k #########################################################
import sys
PY3 = sys.version_info[0] == 3
diff --git a/celery/worker/consumer.py b/celery/worker/consumer.py
index 3bb430c43..e4d741422 100644
--- a/celery/worker/consumer.py
+++ b/celery/worker/consumer.py
@@ -35,7 +35,7 @@
from celery.app.trace import build_tracer
from celery.canvas import signature
from celery.exceptions import InvalidTaskError
-from celery.five import items, values
+from celery.five import buffer_t, items, values
from celery.utils.functional import noop
from celery.utils.log import get_logger
from celery.utils.text import truncate
@@ -44,14 +44,6 @@
from . import heartbeat, loops, pidbox
from .state import task_reserved, maybe_shutdown, revoked, reserved_requests
-try:
- buffer_t = buffer
-except NameError: # pragma: no cover
- # Py3 does not have buffer, but we only need isinstance.
-
- class buffer_t(object): # noqa
- pass
-
__all__ = [
'Consumer', 'Connection', 'Events', 'Heart', 'Control',
'Tasks', 'Evloop', 'Agent', 'Mingle', 'Gossip', 'dump_body',
diff --git a/celery/worker/request.py b/celery/worker/request.py
index 949cc95a3..83aa4c36e 100644
--- a/celery/worker/request.py
+++ b/celery/worker/request.py
@@ -108,13 +108,13 @@ class Request(object):
def __init__(self, message, on_ack=noop,
hostname=None, eventer=None, app=None,
connection_errors=None, request_dict=None,
- task=None, on_reject=noop, **opts):
+ task=None, on_reject=noop, body=None, **opts):
headers = message.headers
self.app = app
self.message = message
name = self.name = headers['c_type']
self.id = headers['id']
- self.body = message.body
+ self.body = message.body if body is None else body
self.content_type = message.content_type
self.content_encoding = message.content_encoding
eta = headers.get('eta')
@@ -192,8 +192,7 @@ def execute_using_pool(self, pool, **kwargs):
soft_timeout = soft_timeout or task.soft_time_limit
result = pool.apply_async(
trace_task_ret,
- args=(self.name, task_id, self.request_dict,
- bytes(body) if isinstance(body, buffer) else body,
+ args=(self.name, task_id, self.request_dict, self.body,
self.content_type, self.content_encoding),
kwargs={'hostname': self.hostname, 'is_eager': False},
accept_callback=self.on_accepted,
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py
index 397aa4f24..31c5050a8 100644
--- a/celery/worker/strategy.py
+++ b/celery/worker/strategy.py
@@ -12,6 +12,7 @@
from kombu.async.timer import to_timestamp
+from celery.five import buffer_t
from celery.utils.log import get_logger
from celery.utils.timeutils import timezone
@@ -25,7 +26,7 @@
def default(task, app, consumer,
info=logger.info, error=logger.error, task_reserved=task_reserved,
- to_system_tz=timezone.to_system):
+ to_system_tz=timezone.to_system, bytes=bytes, buffer_t=buffer_t):
hostname = consumer.hostname
eventer = consumer.event_dispatcher
ReqV2 = Request
@@ -40,14 +41,19 @@ def default(task, app, consumer,
bucket = consumer.task_buckets[task.name]
handle = consumer.on_task_request
limit_task = consumer._limit_task
+ body_can_be_buffer = consumer.pool.body_can_be_buffer
def task_message_handler(message, body, ack, reject, callbacks,
to_timestamp=to_timestamp):
if body is None:
+ body = message.body
+ if not body_can_be_buffer:
+ body = bytes(body) if isinstance(body, buffer_t) else body
req = ReqV2(message,
on_ack=ack, on_reject=reject, app=app,
hostname=hostname, eventer=eventer, task=task,
- connection_errors=connection_errors)
+ connection_errors=connection_errors,
+ body=body)
else:
req = ReqV1(body,
on_ack=ack, on_reject=reject, app=app,
From 411b7f0919b11d0e95697ba3877b1bf2bbf70747 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 15 May 2014 14:07:24 +0100
Subject: [PATCH 0121/1103] Worker now stores result for internal errors,
including ConrtentDisallowed
---
celery/worker/request.py | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/celery/worker/request.py b/celery/worker/request.py
index 83aa4c36e..bac39bb45 100644
--- a/celery/worker/request.py
+++ b/celery/worker/request.py
@@ -319,7 +319,7 @@ def on_success(self, failed__retval__runtime, **kwargs):
if failed:
if isinstance(retval.exception, (SystemExit, KeyboardInterrupt)):
raise retval.exception
- return self.on_failure(retval)
+ return self.on_failure(retval, return_ok=True)
task_ready(self)
if self.task.acks_late:
@@ -339,7 +339,7 @@ def on_retry(self, exc_info):
exception=safe_repr(exc_info.exception.exc),
traceback=safe_str(exc_info.traceback))
- def on_failure(self, exc_info, send_failed_event=True):
+ def on_failure(self, exc_info, send_failed_event=True, return_ok=False):
"""Handler called if the task raised an exception."""
task_ready(self)
@@ -358,14 +358,14 @@ def on_failure(self, exc_info, send_failed_event=True):
# These are special cases where the process would not have had
# time to write the result.
if self.store_errors:
- if isinstance(exc, WorkerLostError):
- self.task.backend.mark_as_failure(
- self.id, exc, request=self,
- )
- elif isinstance(exc, Terminated):
+ if isinstance(exc, Terminated):
self._announce_revoked(
'terminated', True, string(exc), False)
send_failed_event = False # already sent revoked event
+ elif isinstance(exc, WorkerLostError) or not return_ok:
+ self.task.backend.mark_as_failure(
+ self.id, exc, request=self,
+ )
# (acks_late) acknowledge after result stored.
if self.task.acks_late:
self.acknowledge()
@@ -377,6 +377,10 @@ def on_failure(self, exc_info, send_failed_event=True):
traceback=exc_info.traceback,
)
+ if not return_ok:
+ error('Task handler raised error: %r', exc,
+ exc_info=exc_info.exc_info)
+
def acknowledge(self):
"""Acknowledge task."""
if not self.acknowledged:
From a665094b0a25249ed1f7f2612acb48578dcf8bd7 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 15 May 2014 15:14:36 +0100
Subject: [PATCH 0122/1103] Tests passing
---
celery/local.py | 4 ++--
celery/tests/utils/test_local.py | 1 +
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/celery/local.py b/celery/local.py
index e042fa0a0..1a10c2d8c 100644
--- a/celery/local.py
+++ b/celery/local.py
@@ -249,11 +249,10 @@ def __evaluate__(self,
'_Proxy__kwargs')):
try:
thing = Proxy._get_current_object(self)
- object.__setattr__(self, '__thing', thing)
- return thing
except:
raise
else:
+ object.__setattr__(self, '__thing', thing)
for attr in _clean:
try:
object.__delattr__(self, attr)
@@ -274,6 +273,7 @@ def __evaluate__(self,
object.__delattr__(self, '__pending__')
except AttributeError:
pass
+ return thing
def maybe_evaluate(obj):
diff --git a/celery/tests/utils/test_local.py b/celery/tests/utils/test_local.py
index 2b50efcda..67b44b221 100644
--- a/celery/tests/utils/test_local.py
+++ b/celery/tests/utils/test_local.py
@@ -341,6 +341,7 @@ def test_callbacks(self):
self.assertTrue(object.__getattribute__(p, '__pending__'))
self.assertTrue(repr(p))
+ self.assertTrue(p.__evaluated__())
with self.assertRaises(AttributeError):
object.__getattribute__(p, '__pending__')
cbA.assert_called_with(p)
From f838ace597fa335f30bf24a27c39e9759a791d1f Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 15 May 2014 16:41:35 +0100
Subject: [PATCH 0123/1103] Now supports task message protocol 1.0 again
---
celery/app/amqp.py | 2 +-
celery/app/base.py | 2 -
celery/app/trace.py | 13 ++-
celery/tests/worker/test_loops.py | 2 +-
celery/utils/objects.py | 4 +-
celery/worker/consumer.py | 26 +++--
celery/worker/request.py | 143 ++++++++++++++++++----------
celery/worker/strategy.py | 53 +++++++----
docs/internals/protov2.rst | 12 +--
funtests/stress/stress/templates.py | 4 +
10 files changed, 171 insertions(+), 90 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index 5f9813e73..84ced793c 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -296,7 +296,7 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
return task_message(
headers={
'lang': 'py',
- 'c_type': name,
+ 'task': name,
'id': task_id,
'eta': eta,
'expires': expires,
diff --git a/celery/app/base.py b/celery/app/base.py
index 18c280a18..c0174df93 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -9,12 +9,10 @@
from __future__ import absolute_import
import os
-import sys
import threading
import warnings
from collections import defaultdict, deque
-from contextlib import contextmanager
from copy import deepcopy
from operator import attrgetter
diff --git a/celery/app/trace.py b/celery/app/trace.py
index c21ef1ee2..ec63de83f 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -478,11 +478,16 @@ def _fast_trace_task_v1(task, uuid, args, kwargs, request={}, _loc=_localized):
def _fast_trace_task(task, uuid, request, body, content_type,
content_encoding, loads=loads_message, _loc=_localized,
- **extra_request):
+ hostname=None, **_):
tasks, accept = _loc
- args, kwargs = loads(body, content_type, content_encoding,
- accept=accept)
- request.update(args=args, kwargs=kwargs, **extra_request)
+ if content_type:
+ args, kwargs = loads(body, content_type, content_encoding,
+ accept=accept)
+ else:
+ args, kwargs = body
+ request.update({
+ 'args': args, 'kwargs': kwargs, 'hostname': hostname,
+ })
R, I, T, Rstr = tasks[task].__trace__(
uuid, args, kwargs, request,
)
diff --git a/celery/tests/worker/test_loops.py b/celery/tests/worker/test_loops.py
index 80edd393a..9673c5f7c 100644
--- a/celery/tests/worker/test_loops.py
+++ b/celery/tests/worker/test_loops.py
@@ -155,7 +155,7 @@ def test_on_task_received_executes_on_task_message(self):
def test_on_task_message_missing_name(self):
x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
- msg.headers.pop('c_type')
+ msg.headers.pop('task')
on_task(msg)
x.on_unknown_message.assert_called_with(((2, 2), {}), msg)
diff --git a/celery/utils/objects.py b/celery/utils/objects.py
index c81550af2..1555f9caf 100644
--- a/celery/utils/objects.py
+++ b/celery/utils/objects.py
@@ -74,7 +74,9 @@ def __init__(self, provided, fallback, *fb_args, **fb_kwargs):
def __enter__(self):
if self.provided is not None:
return self.provided
- context = self._context = self.fallback(*self.fb_args, **self.fb_kwargs).__enter__()
+ context = self._context = self.fallback(
+ *self.fb_args, **self.fb_kwargs
+ ).__enter__()
return context
def __exit__(self, *exc_info):
diff --git a/celery/worker/consumer.py b/celery/worker/consumer.py
index e4d741422..71cf7c635 100644
--- a/celery/worker/consumer.py
+++ b/celery/worker/consumer.py
@@ -445,24 +445,32 @@ def on_task_received(message):
# will defer deserializing the message body to the pool.
payload = None
try:
- type_ = message.headers['c_type'] # protocol v2
+ type_ = message.headers['task'] # protocol v2
except TypeError:
return on_unknown_message(None, message)
except KeyError:
payload = message.payload
try:
- type_ = payload['task'] # protocol v1
+ type_, payload = payload['task'], payload # protocol v1
except (TypeError, KeyError):
return on_unknown_message(payload, message)
try:
- strategies[type_](
- message, None,
- message.ack_log_error, message.reject_log_error, callbacks,
- )
+ strategy = strategies[type_]
except KeyError as exc:
- on_unknown_task(payload, message, exc)
- except InvalidTaskError as exc:
- on_invalid_task(payload, message, exc)
+ return on_unknown_task(payload, message, exc)
+ else:
+ try:
+ strategy(
+ message, payload, message.ack_log_error,
+ message.reject_log_error, callbacks,
+ )
+ except InvalidTaskError as exc:
+ return on_invalid_task(payload, message, exc)
+ except MemoryError:
+ raise
+ except Exception as exc:
+ # XXX handle as internal error?
+ return on_invalid_task(payload, message, exc)
return on_task_received
diff --git a/celery/worker/request.py b/celery/worker/request.py
index bac39bb45..41b1d765e 100644
--- a/celery/worker/request.py
+++ b/celery/worker/request.py
@@ -44,9 +44,6 @@
_does_info = False
_does_debug = False
-#: Max length of result representation
-RESULT_MAXLEN = 128
-
def __optimize__():
# this is also called by celery.app.trace.setup_worker_optimizations
@@ -65,75 +62,63 @@ def __optimize__():
task_ready = state.task_ready
revoked_tasks = state.revoked
-#: Use when no message object passed to :class:`Request`.
-DEFAULT_FIELDS = {
- 'headers': None,
- 'reply_to': None,
- 'correlation_id': None,
- 'delivery_info': {
- 'exchange': None,
- 'routing_key': None,
- 'priority': 0,
- 'redelivered': False,
- },
-}
-
-
-class RequestV1(object):
- if not IS_PYPY:
- __slots__ = (
- 'app', 'message', 'name', 'id', 'root_id', 'parent_id',
- 'on_ack', 'hostname', 'eventer', 'connection_errors', 'task',
- 'eta', 'expires', 'request_dict', 'acknowledged', 'on_reject',
- 'utc', 'time_start', 'worker_pid', '_already_revoked',
- '_terminate_on_ack', '_apply_result',
- '_tzlocal', '__weakref__', '__dict__',
- )
-
class Request(object):
"""A request for task execution."""
- utc = True
+ acknowledged = False
+ time_start = None
+ worker_pid = None
+ timeouts = (None, None)
+ _already_revoked = False
+ _terminate_on_ack = None
+ _apply_result = None
+ _tzlocal = None
+
if not IS_PYPY: # pragma: no cover
__slots__ = (
'app', 'name', 'id', 'on_ack', 'body',
'hostname', 'eventer', 'connection_errors', 'task', 'eta',
- 'expires', 'request_dict', 'acknowledged', 'on_reject',
- 'utc', 'time_start', 'worker_pid', 'timeouts',
+ 'expires', 'request_dict', 'on_reject', 'utc',
'content_type', 'content_encoding',
- '_already_revoked', '_terminate_on_ack', '_apply_result',
- '_tzlocal', '__weakref__', '__dict__',
+ '__weakref__', '__dict__',
)
def __init__(self, message, on_ack=noop,
hostname=None, eventer=None, app=None,
connection_errors=None, request_dict=None,
- task=None, on_reject=noop, body=None, **opts):
- headers = message.headers
+ task=None, on_reject=noop, body=None,
+ headers=None, decoded=False, utc=True,
+ maybe_make_aware=maybe_make_aware,
+ maybe_iso8601=maybe_iso8601, **opts):
+ if headers is None:
+ headers = message.headers
+ if body is None:
+ body = message.body
self.app = app
self.message = message
- name = self.name = headers['c_type']
+ self.body = body
+ self.utc = utc
+ if decoded:
+ self.content_type = self.content_encoding = None
+ else:
+ self.content_type, self.content_encoding = (
+ message.content_type, message.content_encoding,
+ )
+
+ name = self.name = headers['task']
self.id = headers['id']
- self.body = message.body if body is None else body
- self.content_type = message.content_type
- self.content_encoding = message.content_encoding
- eta = headers.get('eta')
- expires = headers.get('expires')
- self.timeouts = (headers['timeouts'] if 'timeouts' in headers
- else (None, None))
+ if 'timeouts' in headers:
+ self.timeouts = headers['timeouts']
self.on_ack = on_ack
self.on_reject = on_reject
self.hostname = hostname or socket.gethostname()
self.eventer = eventer
self.connection_errors = connection_errors or ()
self.task = task or self.app.tasks[name]
- self.acknowledged = self._already_revoked = False
- self.time_start = self.worker_pid = self._terminate_on_ack = None
- self._apply_result = None
- self._tzlocal = None
# timezone means the message is timezone-aware, and the only timezone
# supported at this point is UTC.
+ eta = headers.get('eta')
if eta is not None:
try:
eta = maybe_iso8601(eta)
@@ -143,6 +128,8 @@ def __init__(self, message, on_ack=noop,
self.eta = maybe_make_aware(eta, self.tzlocal)
else:
self.eta = None
+
+ expires = headers.get('expires')
if expires is not None:
try:
expires = maybe_iso8601(expires)
@@ -186,15 +173,13 @@ def execute_using_pool(self, pool, **kwargs):
if self.revoked():
raise TaskRevokedError(task_id)
- body = self.body
timeout, soft_timeout = self.timeouts
timeout = timeout or task.time_limit
soft_timeout = soft_timeout or task.soft_time_limit
result = pool.apply_async(
trace_task_ret,
args=(self.name, task_id, self.request_dict, self.body,
- self.content_type, self.content_encoding),
- kwargs={'hostname': self.hostname, 'is_eager': False},
+ self.content_type, self.content_encoding, self.hostname),
accept_callback=self.on_accepted,
timeout_callback=self.on_timeout,
callback=self.on_success,
@@ -449,3 +434,61 @@ def reply_to(self):
def correlation_id(self):
# used similarly to reply_to
return self.request_dict['correlation_id']
+
+
+def create_request_cls(base, task, pool, hostname, eventer,
+ ref=ref, revoked_tasks=revoked_tasks,
+ task_ready=task_ready):
+ from celery.app.trace import trace_task_ret as trace
+ default_time_limit = task.time_limit
+ default_soft_time_limit = task.soft_time_limit
+ apply_async = pool.apply_async
+ acks_late = task.acks_late
+ std_kwargs = {'hostname': hostname, 'is_eager': False}
+ events = eventer and eventer.enabled
+
+ class Request(base):
+
+ def execute_using_pool(self, pool, **kwargs):
+ task_id = self.id
+ if (self.expires or task_id in revoked_tasks) and self.revoked():
+ raise TaskRevokedError(task_id)
+
+ timeout, soft_timeout = self.timeouts
+ timeout = timeout or default_time_limit
+ soft_timeout = soft_timeout or default_soft_time_limit
+ result = apply_async(
+ trace,
+ args=(self.name, task_id, self.request_dict, self.body,
+ self.content_type, self.content_encoding),
+ kwargs=std_kwargs,
+ accept_callback=self.on_accepted,
+ timeout_callback=self.on_timeout,
+ callback=self.on_success,
+ error_callback=self.on_failure,
+ soft_timeout=soft_timeout,
+ timeout=timeout,
+ correlation_id=task_id,
+ )
+ # cannot create weakref to None
+ self._apply_result = ref(result) if result is not None else result
+ return result
+
+ def on_success(self, failed__retval__runtime, **kwargs):
+ failed, retval, runtime = failed__retval__runtime
+ if failed:
+ if isinstance(retval.exception, (
+ SystemExit, KeyboardInterrupt)):
+ raise retval.exception
+ return self.on_failure(retval, return_ok=True)
+ task_ready(self)
+
+ if acks_late:
+ self.acknowledge()
+
+ if events:
+ self.send_event(
+ 'task-succeeded', result=retval, runtime=runtime,
+ )
+
+ return Request
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py
index 31c5050a8..a4ed1cac2 100644
--- a/celery/worker/strategy.py
+++ b/celery/worker/strategy.py
@@ -12,11 +12,12 @@
from kombu.async.timer import to_timestamp
+from celery.exceptions import InvalidTaskError
from celery.five import buffer_t
from celery.utils.log import get_logger
from celery.utils.timeutils import timezone
-from .request import Request, RequestV1
+from .request import Request, create_request_cls
from .state import task_reserved
__all__ = ['default']
@@ -24,13 +25,31 @@
logger = get_logger(__name__)
+def proto1_to_proto2(message, body):
+ """Converts Task message protocol 1 arguments to protocol 2.
+
+ Returns tuple of ``(body, headers, already_decoded_status, utc)``
+
+ """
+ try:
+ args, kwargs = body['args'], body['kwargs']
+ kwargs.items
+ except KeyError:
+ raise InvalidTaskError('Message does not have args/kwargs')
+ except AttributeError:
+ raise InvalidTaskError(
+ 'Task keyword arguments must be a mapping',
+ )
+ body['headers'] = message.headers
+ return (args, kwargs), body, True, body.get('utc', True)
+
+
def default(task, app, consumer,
info=logger.info, error=logger.error, task_reserved=task_reserved,
- to_system_tz=timezone.to_system, bytes=bytes, buffer_t=buffer_t):
+ to_system_tz=timezone.to_system, bytes=bytes, buffer_t=buffer_t,
+ proto1_to_proto2=proto1_to_proto2):
hostname = consumer.hostname
eventer = consumer.event_dispatcher
- ReqV2 = Request
- ReqV1 = RequestV1
connection_errors = consumer.connection_errors
_does_info = logger.isEnabledFor(logging.INFO)
events = eventer and eventer.enabled
@@ -42,25 +61,27 @@ def default(task, app, consumer,
handle = consumer.on_task_request
limit_task = consumer._limit_task
body_can_be_buffer = consumer.pool.body_can_be_buffer
+ Req = create_request_cls(Request, task, consumer.pool, hostname, eventer)
+
+ revoked_tasks = consumer.controller.state.revoked
def task_message_handler(message, body, ack, reject, callbacks,
to_timestamp=to_timestamp):
if body is None:
- body = message.body
+ body, headers, decoded, utc = (
+ message.body, message.headers, False, True,
+ )
if not body_can_be_buffer:
body = bytes(body) if isinstance(body, buffer_t) else body
- req = ReqV2(message,
- on_ack=ack, on_reject=reject, app=app,
- hostname=hostname, eventer=eventer, task=task,
- connection_errors=connection_errors,
- body=body)
else:
- req = ReqV1(body,
- on_ack=ack, on_reject=reject, app=app,
- hostname=hostname, eventer=eventer, task=task,
- connection_errors=connection_errors,
- message=message)
- if req.revoked():
+ body, headers, decoded, utc = proto1_to_proto2(message, body)
+ req = Req(
+ message,
+ on_ack=ack, on_reject=reject, app=app, hostname=hostname,
+ eventer=eventer, task=task, connection_errors=connection_errors,
+ body=body, headers=headers, decoded=decoded, utc=utc,
+ )
+ if (req.expires or req.id in revoked_tasks) and req.revoked():
return
if _does_info:
diff --git a/docs/internals/protov2.rst b/docs/internals/protov2.rst
index 8bbdcc4ee..01b2e1325 100644
--- a/docs/internals/protov2.rst
+++ b/docs/internals/protov2.rst
@@ -28,9 +28,9 @@ Notes
- Java/C, etc. can use a thrift/protobuf document as the body
-- Dispatches to actor based on ``c_type``, ``c_meth`` headers
+- Dispatches to actor based on ``task``, ``meth`` headers
- ``c_meth`` is unused by python, but may be used in the future
+ ``meth`` is unused by python, but may be used in the future
to specify class+method pairs.
- Chain gains a dedicated field.
@@ -52,7 +52,7 @@ Notes
- ``root_id`` and ``parent_id`` fields helps keep track of workflows.
-- ``c_shadow`` lets you specify a different name for logs, monitors
+- ``shadow`` lets you specify a different name for logs, monitors
can be used for e.g. meta tasks that calls any function::
from celery.utils.imports import qualname
@@ -108,8 +108,8 @@ Definition
'parent_id': (uuid)parent_id,
# optional
- 'c_meth': (string)unused,
- 'c_shadow': (string)replace_name,
+ 'meth': (string)unused,
+ 'shadow': (string)replace_name,
'eta': (iso8601)eta,
'expires'; (iso8601)expires,
'callbacks': (list)Signature,
@@ -135,7 +135,7 @@ Example
message=json.dumps([[2, 2], {}]),
application_headers={
'lang': 'py',
- 'c_type': 'proj.tasks.add',
+ 'task': 'proj.tasks.add',
'chain': [
# reversed chain list
{'task': 'proj.tasks.add', 'args': (8, )},
diff --git a/funtests/stress/stress/templates.py b/funtests/stress/stress/templates.py
index 520481108..977cfec4d 100644
--- a/funtests/stress/stress/templates.py
+++ b/funtests/stress/stress/templates.py
@@ -125,3 +125,7 @@ class sqs(default):
BROKER_TRANSPORT_OPTIONS = {
'region': os.environ.get('AWS_REGION', 'us-east-1'),
}
+
+@template()
+class proto1(default):
+ CELERY_TASK_PROTOCOL = 1
From 7c372aaf8b191728aec2ae24fff8a0c097df24ff Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 15 May 2014 15:14:36 +0100
Subject: [PATCH 0124/1103] Tests passing
---
celery/local.py | 4 ++--
celery/tests/utils/test_local.py | 1 +
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/celery/local.py b/celery/local.py
index e042fa0a0..1a10c2d8c 100644
--- a/celery/local.py
+++ b/celery/local.py
@@ -249,11 +249,10 @@ def __evaluate__(self,
'_Proxy__kwargs')):
try:
thing = Proxy._get_current_object(self)
- object.__setattr__(self, '__thing', thing)
- return thing
except:
raise
else:
+ object.__setattr__(self, '__thing', thing)
for attr in _clean:
try:
object.__delattr__(self, attr)
@@ -274,6 +273,7 @@ def __evaluate__(self,
object.__delattr__(self, '__pending__')
except AttributeError:
pass
+ return thing
def maybe_evaluate(obj):
diff --git a/celery/tests/utils/test_local.py b/celery/tests/utils/test_local.py
index 2b50efcda..67b44b221 100644
--- a/celery/tests/utils/test_local.py
+++ b/celery/tests/utils/test_local.py
@@ -341,6 +341,7 @@ def test_callbacks(self):
self.assertTrue(object.__getattribute__(p, '__pending__'))
self.assertTrue(repr(p))
+ self.assertTrue(p.__evaluated__())
with self.assertRaises(AttributeError):
object.__getattribute__(p, '__pending__')
cbA.assert_called_with(p)
From dc8e04a4d1fbe8549d8b4f002cbf96220167a90b Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 15 May 2014 17:55:52 +0100
Subject: [PATCH 0125/1103] Tests passing
---
celery/tests/case.py | 2 +-
celery/tests/worker/test_loops.py | 2 +-
celery/tests/worker/test_request.py | 3 ++-
celery/tests/worker/test_strategy.py | 4 +++-
celery/tests/worker/test_worker.py | 29 ++++++++++++++++++++++++++--
5 files changed, 34 insertions(+), 6 deletions(-)
diff --git a/celery/tests/case.py b/celery/tests/case.py
index 520e1f55b..77a2dbc5d 100644
--- a/celery/tests/case.py
+++ b/celery/tests/case.py
@@ -867,7 +867,7 @@ def TaskMessage(name, id=None, args=(), kwargs={}, **options):
message = Mock(name='TaskMessage-{0}'.format(id))
message.headers = {
'id': id,
- 'c_type': name,
+ 'task': name,
}
message.headers.update(options)
message.content_type, message.content_encoding, message.body = dumps(
diff --git a/celery/tests/worker/test_loops.py b/celery/tests/worker/test_loops.py
index 9673c5f7c..fd6c8046f 100644
--- a/celery/tests/worker/test_loops.py
+++ b/celery/tests/worker/test_loops.py
@@ -163,7 +163,7 @@ def test_on_task_not_registered(self):
x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
exc = strategy.side_effect = KeyError(self.add.name)
on_task(msg)
- x.on_unknown_task.assert_called_with(None, msg, exc)
+ x.on_invalid_task.assert_called_with(None, msg, exc)
def test_on_task_InvalidTaskError(self):
x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
diff --git a/celery/tests/worker/test_request.py b/celery/tests/worker/test_request.py
index 02f065df1..05aef9301 100644
--- a/celery/tests/worker/test_request.py
+++ b/celery/tests/worker/test_request.py
@@ -248,7 +248,8 @@ def test_invalid_expires_raises_InvalidTaskError(self):
def test_valid_expires_with_utc_makes_aware(self):
with patch('celery.worker.request.maybe_make_aware') as mma:
- self.get_request(self.add.s(2, 2).set(expires=10))
+ self.get_request(self.add.s(2, 2).set(expires=10),
+ maybe_make_aware=mma)
self.assertTrue(mma.called)
def test_maybe_expire_when_expires_is_None(self):
diff --git a/celery/tests/worker/test_strategy.py b/celery/tests/worker/test_strategy.py
index 87ae65006..6e34f3841 100644
--- a/celery/tests/worker/test_strategy.py
+++ b/celery/tests/worker/test_strategy.py
@@ -70,6 +70,7 @@ def _context(self, sig,
if limit:
bucket = TokenBucket(rate(limit), capacity=1)
consumer.task_buckets[sig.task] = bucket
+ consumer.controller.state.revoked = set()
consumer.disable_rate_limits = not rate_limits
consumer.event_dispatcher.enabled = events
s = sig.type.start_strategy(self.app, consumer, task_reserved=reserved)
@@ -126,9 +127,10 @@ def test_when_rate_limited__limits_disabled(self):
def test_when_revoked(self):
task = self.add.s(2, 2)
task.freeze()
- state.revoked.add(task.id)
try:
with self._context(task) as C:
+ C.consumer.controller.state.revoked.add(task.id)
+ state.revoked.add(task.id)
C()
with self.assertRaises(ValueError):
C.get_request()
diff --git a/celery/tests/worker/test_worker.py b/celery/tests/worker/test_worker.py
index 1596c4616..f3a3e1494 100644
--- a/celery/tests/worker/test_worker.py
+++ b/celery/tests/worker/test_worker.py
@@ -153,7 +153,7 @@ def test_info(self):
l.connection = Mock()
l.connection.info.return_value = {'foo': 'bar'}
l.controller = l.app.WorkController()
- l.controller.pool = Mock()
+ l.pool = l.controller.pool = Mock()
l.controller.pool.info.return_value = [Mock(), Mock()]
l.controller.consumer = l
info = l.controller.stats()
@@ -167,6 +167,8 @@ def test_start_when_closed(self):
def test_connection(self):
l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
l.blueprint.start(l)
self.assertIsInstance(l.connection, Connection)
@@ -229,6 +231,8 @@ def test_receive_message_unknown(self, warn):
def test_receive_message_eta_OverflowError(self, to_timestamp):
to_timestamp.side_effect = OverflowError()
l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
l.blueprint.state = RUN
l.steps.pop()
m = create_task_message(
@@ -251,6 +255,8 @@ def test_receive_message_InvalidTaskError(self, error):
l.blueprint.state = RUN
l.event_dispatcher = mock_event_dispatcher()
l.steps.pop()
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
m = create_task_message(
Mock(), self.foo_task.name,
args=(1, 2), kwargs='foobarbaz', id=1)
@@ -293,6 +299,8 @@ def _get_on_message(self, l):
def test_receieve_message(self):
l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
l.blueprint.state = RUN
l.event_dispatcher = mock_event_dispatcher()
m = create_task_message(
@@ -322,6 +330,8 @@ def loop(self, *args, **kwargs):
l = MockConsumer(self.buffer.put, timer=self.timer,
send_events=False, pool=BasePool(), app=self.app)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
l.channel_errors = (KeyError, )
with self.assertRaises(KeyError):
l.start()
@@ -340,6 +350,8 @@ def loop(self, *args, **kwargs):
l = MockConsumer(self.buffer.put, timer=self.timer,
send_events=False, pool=BasePool(), app=self.app)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
l.connection_errors = (KeyError, )
self.assertRaises(SyntaxError, l.start)
@@ -422,6 +434,8 @@ def test_ignore_errors(self):
def test_apply_eta_task(self):
from celery.worker import state
l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
l.qos = QoS(None, 10)
task = object()
@@ -433,6 +447,8 @@ def test_apply_eta_task(self):
def test_receieve_message_eta_isoformat(self):
l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
l.blueprint.state = RUN
l.steps.pop()
m = create_task_message(
@@ -518,6 +534,8 @@ def test_receieve_message_not_registered(self):
@patch('celery.worker.consumer.logger')
def test_receieve_message_ack_raises(self, logger, warn):
l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
l.blueprint.state = RUN
channel = Mock()
m = create_task_message(
@@ -550,6 +568,8 @@ def pp(*args, **kwargs):
pp('TEST RECEIVE MESSAGE ETA')
pp('+CREATE MYKOMBUCONSUMER')
l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
pp('-CREATE MYKOMBUCONSUMER')
l.steps.pop()
l.event_dispatcher = mock_event_dispatcher()
@@ -732,6 +752,8 @@ def reset_connection(self):
init_callback = Mock()
l = _Consumer(self.buffer.put, timer=self.timer,
init_callback=init_callback, app=self.app)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
l.task_consumer = Mock()
l.broadcast_consumer = Mock()
l.qos = _QoS()
@@ -754,6 +776,8 @@ def raises_KeyError(*args, **kwargs):
init_callback.reset_mock()
l = _Consumer(self.buffer.put, timer=self.timer, app=self.app,
send_events=False, init_callback=init_callback)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
l.qos = _QoS()
l.task_consumer = Mock()
l.broadcast_consumer = Mock()
@@ -765,8 +789,9 @@ def raises_KeyError(*args, **kwargs):
def test_reset_connection_with_no_node(self):
l = Consumer(self.buffer.put, timer=self.timer, app=self.app)
+ l.controller = l.app.WorkController()
+ l.pool = l.controller.pool = Mock()
l.steps.pop()
- self.assertEqual(None, l.pool)
l.blueprint.start(l)
From e71f86d66e105914feb881708cd317a28abf7113 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 15 May 2014 18:01:15 +0100
Subject: [PATCH 0126/1103] 3.2.0a2
---
celery/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/__init__.py b/celery/__init__.py
index 86a3e450f..da44bf4ea 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -14,7 +14,7 @@
)
SERIES = 'DEV'
-VERSION = version_info_t(3, 2, 0, 'a1', '')
+VERSION = version_info_t(3, 2, 0, 'a2', '')
__version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION)
__author__ = 'Ask Solem'
__contact__ = 'ask@celeryproject.org'
From 5f8cfd78305b3056aaa6b42f068b60d8024a5dfe Mon Sep 17 00:00:00 2001
From: Alexey Kotlyarov
Date: Fri, 16 May 2014 10:17:14 +1000
Subject: [PATCH 0127/1103] Make empty ResultSet support get()
---
celery/result.py | 2 +-
celery/tests/tasks/test_result.py | 6 ++++++
2 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/celery/result.py b/celery/result.py
index eb7364a84..34943a76d 100644
--- a/celery/result.py
+++ b/celery/result.py
@@ -718,7 +718,7 @@ def subtasks(self):
@property
def supports_native_join(self):
- return self.results[0].supports_native_join
+ return len(self.results) > 0 and self.results[0].supports_native_join
@property
def backend(self):
diff --git a/celery/tests/tasks/test_result.py b/celery/tests/tasks/test_result.py
index ee3c9bb1a..37692d00e 100644
--- a/celery/tests/tasks/test_result.py
+++ b/celery/tests/tasks/test_result.py
@@ -276,6 +276,12 @@ def test_get(self):
x.get()
self.assertTrue(x.join_native.called)
+ def test_get_empty(self):
+ x = self.app.ResultSet([])
+ x.join = Mock()
+ x.get()
+ self.assertTrue(x.join.called)
+
def test_add(self):
x = self.app.ResultSet([1])
x.add(2)
From bd4dc6fb35d580b2a8da797beefbcf7e34a28b89 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Fri, 16 May 2014 14:07:17 +0100
Subject: [PATCH 0128/1103] Cosmetics for #2041
---
celery/result.py | 5 ++++-
celery/tests/tasks/test_result.py | 3 ++-
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/celery/result.py b/celery/result.py
index 34943a76d..9bdd7d4f7 100644
--- a/celery/result.py
+++ b/celery/result.py
@@ -718,7 +718,10 @@ def subtasks(self):
@property
def supports_native_join(self):
- return len(self.results) > 0 and self.results[0].supports_native_join
+ try:
+ return self.results[0].supports_native_join
+ except IndexError:
+ pass
@property
def backend(self):
diff --git a/celery/tests/tasks/test_result.py b/celery/tests/tasks/test_result.py
index 37692d00e..61bf09dfe 100644
--- a/celery/tests/tasks/test_result.py
+++ b/celery/tests/tasks/test_result.py
@@ -278,7 +278,8 @@ def test_get(self):
def test_get_empty(self):
x = self.app.ResultSet([])
- x.join = Mock()
+ self.assertIsNone(x.supports_native_join)
+ x.join = Mock(name='join')
x.get()
self.assertTrue(x.join.called)
From 5a508638faf5dded9c9bb261090a3de6dbaf8bbd Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 13:54:15 +0100
Subject: [PATCH 0129/1103] Fixes duplicate nodename warning. Closes #2046
---
celery/app/control.py | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/celery/app/control.py b/celery/app/control.py
index 34076df0e..284537493 100644
--- a/celery/app/control.py
+++ b/celery/app/control.py
@@ -15,26 +15,27 @@
from kombu.utils import cached_property
from celery.exceptions import DuplicateNodenameWarning
+from celery.utils.text import pluralize
__all__ = ['Inspect', 'Control', 'flatten_reply']
W_DUPNODE = """\
-Received multiple replies from node name {0!r}.
+Received multiple replies from node name: {0!r}.
Please make sure you give each node a unique nodename using the `-n` option.\
"""
def flatten_reply(reply):
- nodes = {}
- seen = set()
+ nodes, dupes = {}, set()
for item in reply:
- dup = next((nodename in seen for nodename in item), None)
- if dup:
- warnings.warn(DuplicateNodenameWarning(
- W_DUPNODE.format(dup),
- ))
- seen.update(item)
+ [dupes.add(name) for name in item if name in nodes]
nodes.update(item)
+ if dupes:
+ warnings.warn(DuplicateNodenameWarning(
+ W_DUPNODE.format(
+ pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)),
+ ),
+ ))
return nodes
From e08012f3ae00f3e94887390d651081cd1ea3a088 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 14:15:10 +0100
Subject: [PATCH 0130/1103] Worker --detach default workdir is now CWD
---
celery/bin/celeryd_detach.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/bin/celeryd_detach.py b/celery/bin/celeryd_detach.py
index 12e1f6497..0e88d6052 100644
--- a/celery/bin/celeryd_detach.py
+++ b/celery/bin/celeryd_detach.py
@@ -30,7 +30,7 @@
C_FAKEFORK = os.environ.get('C_FAKEFORK')
OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + (
- Option('--workdir', default='/', dest='working_directory'),
+ Option('--workdir', default=None, dest='working_directory'),
Option('--fake',
default=False, action='store_true', dest='fake',
help="Don't fork (for debugging purposes)"),
From b345094151dc85bdf35d2e8cf65214be9246a07e Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 14:15:47 +0100
Subject: [PATCH 0131/1103] Worker --detach with C_FAKEFORK no longer closes
open fds. Closes #2044
---
celery/platforms.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/celery/platforms.py b/celery/platforms.py
index c4013b578..11efd7bcd 100644
--- a/celery/platforms.py
+++ b/celery/platforms.py
@@ -317,9 +317,10 @@ def open(self):
if self.after_chdir:
self.after_chdir()
- close_open_fds(self.stdfds)
- for fd in self.stdfds:
- self.redirect_to_null(maybe_fileno(fd))
+ if not self.fake:
+ close_open_fds(self.stdfds)
+ for fd in self.stdfds:
+ self.redirect_to_null(maybe_fileno(fd))
self._is_open = True
__enter__ = open
From 5a4cf8b7d02618ae16e22812cd44ca1f5c576e2c Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 14:29:24 +0100
Subject: [PATCH 0132/1103] Did not log for Ignore and Reject
---
celery/app/trace.py | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/celery/app/trace.py b/celery/app/trace.py
index ec63de83f..d06ed9f48 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -150,6 +150,12 @@ def handle_error_state(self, task, eager=False):
FAILURE: self.handle_failure,
}[self.state](task, store_errors=store_errors)
+ def handle_reject(self, task, **kwargs):
+ self._log_error(task, ExceptionInfo())
+
+ def handle_ignore(self, task, **kwargs):
+ self._log_error(task, ExceptionInfo())
+
def handle_retry(self, task, store_errors=True):
"""Handle retry exception."""
# the exception raised is the Retry semi-predicate,
@@ -353,9 +359,11 @@ def trace_task(uuid, args, kwargs, request=None):
except Reject as exc:
I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
state, retval = I.state, I.retval
+ I.handle_reject(task)
except Ignore as exc:
I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
state, retval = I.state, I.retval
+ I.handle_ignore(task)
except Retry as exc:
I, R, state, retval = on_error(
task_request, exc, uuid, RETRY, call_errbacks=False,
From 8c56d527857fb05e8517251a3d1eeb4ecbd56fc4 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 14:45:01 +0100
Subject: [PATCH 0133/1103] Events are now transient by default
---
celery/events/__init__.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/celery/events/__init__.py b/celery/events/__init__.py
index b4ca9045c..a4142f76a 100644
--- a/celery/events/__init__.py
+++ b/celery/events/__init__.py
@@ -124,7 +124,7 @@ class EventDispatcher(object):
def __init__(self, connection=None, hostname=None, enabled=True,
channel=None, buffer_while_offline=True, app=None,
- serializer=None, groups=None):
+ serializer=None, groups=None, delivery_mode=1):
self.app = app_or_default(app or self.app)
self.connection = connection
self.channel = channel
@@ -139,6 +139,7 @@ def __init__(self, connection=None, hostname=None, enabled=True,
self.groups = set(groups or [])
self.tzoffset = [-time.timezone, -time.altzone]
self.clock = self.app.clock
+ self.delivery_mode = delivery_mode
if not connection and channel:
self.connection = channel.connection.client
self.enabled = enabled
@@ -213,6 +214,7 @@ def publish(self, type, fields, producer, retry=False,
declare=[exchange],
serializer=self.serializer,
headers=self.headers,
+ delivery_mode=self.delivery_mode,
)
def send(self, type, blind=False, **fields):
From ee0016bf1f024dc880ea2eb108ea25855010e2db Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 14:50:25 +0100
Subject: [PATCH 0134/1103] Stupid zsh is messing with our beloved ksh heritage
:chicken:
Closes #2038
---
docs/includes/installation.txt | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/includes/installation.txt b/docs/includes/installation.txt
index 54ec954b0..ffc9edef0 100644
--- a/docs/includes/installation.txt
+++ b/docs/includes/installation.txt
@@ -28,9 +28,9 @@ commas.
.. code-block:: bash
- $ pip install celery[librabbitmq]
+ $ pip install "celery[librabbitmq]"
- $ pip install celery[librabbitmq,redis,auth,msgpack]
+ $ pip install "celery[librabbitmq,redis,auth,msgpack]"
The following bundles are available:
From 961429539e620180a7c457db160735fcd6d6dad7 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 16:23:02 +0100
Subject: [PATCH 0135/1103] Task proto2: Custom headers were ignored
---
celery/app/amqp.py | 10 ++++++----
docs/userguide/signals.rst | 27 ++++++++++++++++++++-------
2 files changed, 26 insertions(+), 11 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index 84ced793c..4a4fbc15c 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -417,7 +417,9 @@ def publish_task(producer, name, message,
compression=None, declare=None,
headers=None, **kwargs):
retry = default_retry if retry is None else retry
- headers, properties, body, sent_event = message
+ headers2, properties, body, sent_event = message
+ if headers:
+ headers2.update(headers)
if kwargs:
properties.update(kwargs)
@@ -448,7 +450,7 @@ def publish_task(producer, name, message,
send_before_publish(
sender=name, body=body,
exchange=exchange, routing_key=routing_key,
- declare=declare, headers=headers,
+ declare=declare, headers=headers2,
properties=kwargs, retry_policy=retry_policy,
)
ret = producer.publish(
@@ -459,11 +461,11 @@ def publish_task(producer, name, message,
compression=compression or default_compressor,
retry=retry, retry_policy=_rp,
delivery_mode=delivery_mode, declare=declare,
- headers=headers,
+ headers=headers2,
**properties
)
if after_receivers:
- send_after_publish(sender=name, body=body,
+ send_after_publish(sender=name, body=body, headers=headers2,
exchange=exchange, routing_key=routing_key)
if sent_receivers: # XXX deprecated
send_task_sent(sender=name, task_id=body['id'], task=name,
diff --git a/docs/userguide/signals.rst b/docs/userguide/signals.rst
index 7b927472b..00dab2dd9 100644
--- a/docs/userguide/signals.rst
+++ b/docs/userguide/signals.rst
@@ -28,9 +28,12 @@ Example connecting to the :signal:`after_task_publish` signal:
from celery.signals import after_task_publish
@after_task_publish.connect
- def task_sent_handler(sender=None, body=None, **kwargs):
- print('after_task_publish for task id {body[id]}'.format(
- body=body,
+ def task_sent_handler(sender=None, headers=None, body=None, **kwargs):
+ # information about task are located in headers for task messages
+ # using the task protocol version 2.
+ info = headers if 'task' in headers else body
+ print('after_task_publish for task id {info[id]}'.format(
+ info=info,
))
@@ -44,9 +47,12 @@ is published:
.. code-block:: python
@after_task_publish.connect(sender='proj.tasks.add')
- def task_sent_handler(sender=None, body=None, **kwargs):
- print('after_task_publish for task id {body[id]}'.format(
- body=body,
+ def task_sent_handler(sender=None, headers=None, body=None, **kwargs):
+ # information about task are located in headers for task messages
+ # using the task protocol version 2.
+ info = headers if 'task' in headers else body
+ print('after_task_publish for task id {info[id]}'.format(
+ info=info,
))
Signals use the same implementation as django.core.dispatch. As a result other
@@ -123,9 +129,16 @@ Sender is the name of the task being sent.
Provides arguments:
+* headers
+
+ The task message headers, see :ref:`task-message-protocol-v2`
+ and :ref:`task-message-protocol-v1`.
+ for a reference of possible fields that can be defined.
+
* body
- The task message body, see :ref:`task-message-protocol-v1`
+ The task message body, see :ref:`task-message-protocol-v2`
+ and :ref:`task-message-protocol-v1`.
for a reference of possible fields that can be defined.
* exchange
From fbbef2723ad4481f4a80e6bd72a03d3836ebf2b8 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 16:24:08 +0100
Subject: [PATCH 0136/1103] Canvas: Makes sure group() in a workflow is not
applied as a "celery.group" task
---
celery/canvas.py | 19 +++++++++++++++----
1 file changed, 15 insertions(+), 4 deletions(-)
diff --git a/celery/canvas.py b/celery/canvas.py
index 2c2883bcf..ae98fa4e1 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -570,9 +570,19 @@ def from_dict(self, d, app=None):
task['args'] = task._merge(d['args'])[0]
return group(tasks, app=app, **kwdict(d['options']))
- def _prepared(self, tasks, partial_args, group_id, root_id):
+ def _prepared(self, tasks, partial_args, group_id, root_id,
+ dict=dict, Signature=Signature, from_dict=Signature.from_dict):
for task in tasks:
- task = task.clone(partial_args)
+ if isinstance(task, dict):
+ if isinstance(task, Signature):
+ # local sigs are always of type Signature, and we
+ # clone them to make sure we do not modify the originals.
+ task = task.clone()
+ else:
+ # serialized sigs must be converted to Signature.
+ task = from_dict(task)
+ if partial_args and not task.immutable:
+ task.args = tuple(partial_args) + tuple(task.args)
yield task, task.freeze(group_id=group_id, root_id=root_id)
def _apply_tasks(self, tasks, producer=None, app=None, **options):
@@ -792,7 +802,7 @@ def __repr__(self):
def signature(varies, *args, **kwargs):
- if not (args or kwargs) and isinstance(varies, dict):
+ if isinstance(varies, dict):
if isinstance(varies, Signature):
return varies.clone()
return Signature.from_dict(varies)
@@ -804,9 +814,10 @@ def maybe_signature(d, app=None):
if d is not None:
if isinstance(d, dict):
if not isinstance(d, Signature):
- return signature(d, app=app)
+ d = signature(d)
elif isinstance(d, list):
return [maybe_signature(s, app=app) for s in d]
+
if app is not None:
d._app = app
return d
From 6ed09aaccf6332e90251228638683ce6a92f5026 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 18:53:37 +0100
Subject: [PATCH 0137/1103] Import print_function in every module using print
---
celery/__init__.py | 2 +-
celery/__main__.py | 2 +-
celery/app/__init__.py | 2 +-
celery/apps/beat.py | 2 +-
celery/events/snapshot.py | 2 +-
celery/tests/worker/test_request.py | 1 -
celery/utils/timer2.py | 2 +-
celery/utils/timeutils.py | 2 +-
celery/worker/control.py | 2 +-
celery/worker/state.py | 2 +-
10 files changed, 9 insertions(+), 10 deletions(-)
diff --git a/celery/__init__.py b/celery/__init__.py
index da44bf4ea..6ec3a4e77 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -5,7 +5,7 @@
# :copyright: (c) 2012-2013 GoPivotal, Inc., All rights reserved.
# :license: BSD (3 Clause), see LICENSE for more details.
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function, unicode_literals
from collections import namedtuple
diff --git a/celery/__main__.py b/celery/__main__.py
index 04448e239..ba98e0a8a 100644
--- a/celery/__main__.py
+++ b/celery/__main__.py
@@ -1,4 +1,4 @@
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
import sys
diff --git a/celery/app/__init__.py b/celery/app/__init__.py
index 952a8746d..3c690fb41 100644
--- a/celery/app/__init__.py
+++ b/celery/app/__init__.py
@@ -6,7 +6,7 @@
Celery Application.
"""
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
import os
diff --git a/celery/apps/beat.py b/celery/apps/beat.py
index 46cef9b8b..a3c278c86 100644
--- a/celery/apps/beat.py
+++ b/celery/apps/beat.py
@@ -10,7 +10,7 @@
and so on.
"""
-from __future__ import absolute_import, unicode_literals
+from __future__ import absolute_import, print_statement, unicode_literals
import numbers
import socket
diff --git a/celery/events/snapshot.py b/celery/events/snapshot.py
index 0dd41554c..a202a70f3 100644
--- a/celery/events/snapshot.py
+++ b/celery/events/snapshot.py
@@ -10,7 +10,7 @@
in :mod:`djcelery.snapshots` in the `django-celery` distribution.
"""
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
from kombu.utils.limits import TokenBucket
diff --git a/celery/tests/worker/test_request.py b/celery/tests/worker/test_request.py
index 05aef9301..2700d26ef 100644
--- a/celery/tests/worker/test_request.py
+++ b/celery/tests/worker/test_request.py
@@ -805,7 +805,6 @@ def test_execute_fail(self):
args=[4],
kwargs={},
)
- print(job.execute())
self.assertIsInstance(job.execute(), ExceptionInfo)
meta = self.mytask_raising.backend.get_task_meta(tid)
self.assertEqual(meta['status'], states.FAILURE)
diff --git a/celery/utils/timer2.py b/celery/utils/timer2.py
index e42660c23..fdac90803 100644
--- a/celery/utils/timer2.py
+++ b/celery/utils/timer2.py
@@ -6,7 +6,7 @@
Scheduler for Python functions.
"""
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
import os
import sys
diff --git a/celery/utils/timeutils.py b/celery/utils/timeutils.py
index 5b75b83a8..bd0b066e6 100644
--- a/celery/utils/timeutils.py
+++ b/celery/utils/timeutils.py
@@ -6,7 +6,7 @@
This module contains various utilities related to dates and times.
"""
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
import numbers
import os
diff --git a/celery/worker/control.py b/celery/worker/control.py
index b0bb93fde..d0b119d85 100644
--- a/celery/worker/control.py
+++ b/celery/worker/control.py
@@ -227,7 +227,7 @@ def objgraph(state, num=200, max_depth=10, type='Request'): # pragma: no cover
import objgraph
except ImportError:
raise ImportError('Requires the objgraph library')
- print('Dumping graph for type %r' % (type, ))
+ logger.info('Dumping graph for type %r', type)
with tempfile.NamedTemporaryFile(prefix='cobjg',
suffix='.png', delete=False) as fh:
objects = objgraph.by_type(type)[:num]
diff --git a/celery/worker/state.py b/celery/worker/state.py
index f2f7a79fa..3e1ab95c5 100644
--- a/celery/worker/state.py
+++ b/celery/worker/state.py
@@ -9,7 +9,7 @@
statistics, and revoked tasks.
"""
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
import os
import sys
From 90c0ee87456aa5cc7381775e6a3f6f3ed7f39c2b Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 19:15:08 +0100
Subject: [PATCH 0138/1103] flakes
---
celery/canvas.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/celery/canvas.py b/celery/canvas.py
index ae98fa4e1..f419cabb3 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -570,8 +570,8 @@ def from_dict(self, d, app=None):
task['args'] = task._merge(d['args'])[0]
return group(tasks, app=app, **kwdict(d['options']))
- def _prepared(self, tasks, partial_args, group_id, root_id,
- dict=dict, Signature=Signature, from_dict=Signature.from_dict):
+ def _prepared(self, tasks, partial_args, group_id, root_id, dict=dict,
+ Signature=Signature, from_dict=Signature.from_dict):
for task in tasks:
if isinstance(task, dict):
if isinstance(task, Signature):
From d6ae1a2be401a3bfd53cd1e68ee0a1226f8848e3 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 20:04:22 +0100
Subject: [PATCH 0139/1103] Fix test for working_directory
---
celery/tests/bin/test_celeryd_detach.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/tests/bin/test_celeryd_detach.py b/celery/tests/bin/test_celeryd_detach.py
index 000d2f633..964aa0b77 100644
--- a/celery/tests/bin/test_celeryd_detach.py
+++ b/celery/tests/bin/test_celeryd_detach.py
@@ -85,7 +85,7 @@ def test_execute_from_commandline(self, detach, exit):
detach.assert_called_with(
path=x.execv_path, uid=None, gid=None,
umask=0, fake=False, logfile='/var/log', pidfile='celeryd.pid',
- working_directory='/',
+ working_directory=None,
argv=x.execv_argv + [
'-c', '1', '-lDEBUG',
'--logfile=/var/log', '--pidfile=celeryd.pid',
From b6e49175ce4df301bcb2c97b1bfe5e2d3a535245 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 20:23:37 +0100
Subject: [PATCH 0140/1103] beat --detached now runs after forkers
---
celery/bin/celeryd_detach.py | 3 ++-
celery/platforms.py | 7 ++++++-
celery/tests/bin/test_celeryd_detach.py | 6 ++++--
3 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/celery/bin/celeryd_detach.py b/celery/bin/celeryd_detach.py
index 0e88d6052..8a6d339d4 100644
--- a/celery/bin/celeryd_detach.py
+++ b/celery/bin/celeryd_detach.py
@@ -40,7 +40,8 @@
def detach(path, argv, logfile=None, pidfile=None, uid=None,
gid=None, umask=0, working_directory=None, fake=False, app=None):
fake = 1 if C_FAKEFORK else fake
- with detached(logfile, pidfile, uid, gid, umask, working_directory, fake):
+ with detached(logfile, pidfile, uid, gid, umask, working_directory, fake,
+ after_forkers=False):
try:
os.execv(path, [path] + argv)
except Exception:
diff --git a/celery/platforms.py b/celery/platforms.py
index 11efd7bcd..651b8f5fa 100644
--- a/celery/platforms.py
+++ b/celery/platforms.py
@@ -36,6 +36,7 @@
resource = try_import('resource')
pwd = try_import('pwd')
grp = try_import('grp')
+mputil = try_import('multiprocessing.util')
__all__ = ['EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM',
'IS_OSX', 'IS_WINDOWS', 'pyimplementation', 'LockFailed',
@@ -294,11 +295,13 @@ class DaemonContext(object):
_is_open = False
def __init__(self, pidfile=None, workdir=None, umask=None,
- fake=False, after_chdir=None, **kwargs):
+ fake=False, after_chdir=None, after_forkers=True,
+ **kwargs):
self.workdir = workdir or DAEMON_WORKDIR
self.umask = DAEMON_UMASK if umask is None else umask
self.fake = fake
self.after_chdir = after_chdir
+ self.after_forkers = after_forkers
self.stdfds = (sys.stdin, sys.stdout, sys.stderr)
def redirect_to_null(self, fd):
@@ -321,6 +324,8 @@ def open(self):
close_open_fds(self.stdfds)
for fd in self.stdfds:
self.redirect_to_null(maybe_fileno(fd))
+ if self.after_forkers and mputil is not None:
+ mputil._run_after_forkers()
self._is_open = True
__enter__ = open
diff --git a/celery/tests/bin/test_celeryd_detach.py b/celery/tests/bin/test_celeryd_detach.py
index 964aa0b77..9e1b4a04d 100644
--- a/celery/tests/bin/test_celeryd_detach.py
+++ b/celery/tests/bin/test_celeryd_detach.py
@@ -24,8 +24,10 @@ def test_execs(self, setup_logs, logger, execv, detached):
detach('/bin/boo', ['a', 'b', 'c'], logfile='/var/log',
pidfile='/var/pid')
- detached.assert_called_with('/var/log', '/var/pid', None, None, 0,
- None, False)
+ detached.assert_called_with(
+ '/var/log', '/var/pid', None, None, 0, None, False,
+ after_forkers=False,
+ )
execv.assert_called_with('/bin/boo', ['/bin/boo', 'a', 'b', 'c'])
execv.side_effect = Exception('foo')
From 9b8e2cbd50086020abce1cb7157e8825f6778a79 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 20:47:30 +0100
Subject: [PATCH 0141/1103] Fixes typo
---
celery/apps/beat.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/apps/beat.py b/celery/apps/beat.py
index a3c278c86..97fa4829a 100644
--- a/celery/apps/beat.py
+++ b/celery/apps/beat.py
@@ -10,7 +10,7 @@
and so on.
"""
-from __future__ import absolute_import, print_statement, unicode_literals
+from __future__ import absolute_import, print_function, unicode_literals
import numbers
import socket
From 3ff387099dac98bc78d20f79b168ecc03f78327a Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 21:46:12 +0100
Subject: [PATCH 0142/1103] Remove use of kwdict workaround fixed in py2.7
---
celery/canvas.py | 12 ++++++------
celery/utils/__init__.py | 2 +-
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/celery/canvas.py b/celery/canvas.py
index f419cabb3..f7ba6d1cd 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -18,7 +18,7 @@
from operator import itemgetter
from itertools import chain as _chain
-from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid
+from kombu.utils import cached_property, fxrange, reprcall, uuid
from celery._state import current_app, get_current_worker_task
from celery.utils.functional import (
@@ -133,7 +133,7 @@ def register_type(cls, subclass, name=None):
def from_dict(self, d, app=None):
typ = d.get('subtask_type')
if typ:
- return self.TYPES[typ].from_dict(kwdict(d), app=app)
+ return self.TYPES[typ].from_dict(d, app=app)
return Signature(d, app=app)
def __init__(self, task=None, args=None, kwargs=None, options=None,
@@ -449,7 +449,7 @@ def from_dict(self, d, app=None):
if d['args'] and tasks:
# partial args passed on to first task in chain (Issue #1057).
tasks[0]['args'] = tasks[0]._merge(d['args'])[0]
- return chain(*d['kwargs']['tasks'], app=app, **kwdict(d['options']))
+ return chain(*d['kwargs']['tasks'], app=app, **d['options'])
@property
def app(self):
@@ -568,7 +568,7 @@ def from_dict(self, d, app=None):
# partial args passed on to all tasks in the group (Issue #1057).
for task in tasks:
task['args'] = task._merge(d['args'])[0]
- return group(tasks, app=app, **kwdict(d['options']))
+ return group(tasks, app=app, **d['options'])
def _prepared(self, tasks, partial_args, group_id, root_id, dict=dict,
Signature=Signature, from_dict=Signature.from_dict):
@@ -707,8 +707,8 @@ def freeze(self, *args, **kwargs):
@classmethod
def from_dict(self, d, app=None):
- args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs']))
- return self(*args, app=app, **kwdict(d))
+ args, d['kwargs'] = self._unpack_args(**d['kwargs'])
+ return self(*args, app=app, **d)
@staticmethod
def _unpack_args(header=None, body=None, **kwargs):
diff --git a/celery/utils/__init__.py b/celery/utils/__init__.py
index a3264b8d6..2e31c9f68 100644
--- a/celery/utils/__init__.py
+++ b/celery/utils/__init__.py
@@ -386,5 +386,5 @@ def resolve(match):
instantiate, import_from_cwd
)
from .functional import chunks, noop # noqa
-from kombu.utils import cached_property, kwdict, uuid # noqa
+from kombu.utils import cached_property, uuid # noqa
gen_unique_id = uuid
From 6ac362660a5ada7374ef2c8eda8e104b307a386f Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Mon, 19 May 2014 22:04:29 +0100
Subject: [PATCH 0143/1103] Now imports OrderedDict directly from collections
---
celery/bin/multi.py | 3 +--
celery/five.py | 9 ++-------
celery/result.py | 3 +--
celery/utils/functional.py | 2 +-
funtests/stress/stress/suite.py | 4 +---
5 files changed, 6 insertions(+), 15 deletions(-)
diff --git a/celery/bin/multi.py b/celery/bin/multi.py
index 2f2055ece..7bb90cf92 100644
--- a/celery/bin/multi.py
+++ b/celery/bin/multi.py
@@ -103,13 +103,12 @@
import socket
import sys
-from collections import defaultdict, namedtuple
+from collections import OrderedDict, defaultdict, namedtuple
from functools import partial
from subprocess import Popen
from time import sleep
from kombu.utils import cached_property
-from kombu.utils.compat import OrderedDict
from kombu.utils.encoding import from_utf8
from celery import VERSION_BANNER
diff --git a/celery/five.py b/celery/five.py
index bfe9ff0cd..5a272c9d6 100644
--- a/celery/five.py
+++ b/celery/five.py
@@ -14,9 +14,8 @@
'zip_longest', 'map', 'string', 'string_t',
'long_t', 'text_t', 'range', 'int_types', 'items', 'keys', 'values',
'nextfun', 'reraise', 'WhateverIO', 'with_metaclass',
- 'OrderedDict', 'THREAD_TIMEOUT_MAX', 'format_d',
- 'class_property', 'reclassmethod', 'create_module',
- 'recreate_module', 'monotonic']
+ 'THREAD_TIMEOUT_MAX', 'format_d', 'class_property', 'reclassmethod',
+ 'create_module', 'recreate_module', 'monotonic']
import io
@@ -152,10 +151,6 @@ def _clone_with_metaclass(Class):
return _clone_with_metaclass
-############## collections.OrderedDict ######################################
-# was moved to kombu
-from kombu.utils.compat import OrderedDict # noqa
-
############## threading.TIMEOUT_MAX #######################################
try:
from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX
diff --git a/celery/result.py b/celery/result.py
index 9bdd7d4f7..cab76bab4 100644
--- a/celery/result.py
+++ b/celery/result.py
@@ -11,12 +11,11 @@
import time
import warnings
-from collections import deque
+from collections import OrderedDict, deque
from contextlib import contextmanager
from copy import copy
from kombu.utils import cached_property
-from kombu.utils.compat import OrderedDict
from . import current_app
from . import states
diff --git a/celery/utils/functional.py b/celery/utils/functional.py
index 8903ff08d..be7a2289c 100644
--- a/celery/utils/functional.py
+++ b/celery/utils/functional.py
@@ -11,12 +11,12 @@
import sys
import threading
+from collections import OrderedDict
from functools import wraps
from itertools import islice
from kombu.utils import cached_property
from kombu.utils.functional import lazy, maybe_evaluate, is_list, maybe_list
-from kombu.utils.compat import OrderedDict
from celery.five import UserDict, UserList, items, keys
diff --git a/funtests/stress/stress/suite.py b/funtests/stress/stress/suite.py
index 10d56f4cf..134850409 100755
--- a/funtests/stress/stress/suite.py
+++ b/funtests/stress/stress/suite.py
@@ -6,12 +6,10 @@
import socket
import sys
-from collections import defaultdict, namedtuple
+from collections import OrderedDict, defaultdict, namedtuple
from itertools import count
from time import sleep
-from kombu.utils.compat import OrderedDict
-
from celery import group, VERSION_BANNER
from celery.exceptions import TimeoutError
from celery.five import items, monotonic, range, values
From 43ef0321058f318310cb0abd994b82047a25751e Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 15:59:52 +0100
Subject: [PATCH 0144/1103] unicode literals
---
celery/__main__.py | 2 +-
celery/_state.py | 2 +-
celery/app/__init__.py | 2 +-
celery/backends/base.py | 3 +--
celery/backends/cassandra.py | 9 ++++-----
celery/contrib/rdb.py | 2 +-
celery/events/dumper.py | 2 +-
celery/schedules.py | 12 ++++++------
celery/tests/backends/test_redis.py | 7 ++++---
celery/tests/compat_modules/test_compat.py | 6 +++---
celery/tests/utils/test_timeutils.py | 14 --------------
celery/utils/timeutils.py | 5 ++---
docs/internals/app-overview.rst | 20 +-------------------
extra/release/verify_config_reference.py | 5 +++--
funtests/benchmarks/bench_worker.py | 2 +-
funtests/stress/stress/__main__.py | 2 +-
funtests/stress/stress/fbi.py | 2 +-
funtests/suite/test_leak.py | 4 ++--
pavement.py | 2 +-
19 files changed, 35 insertions(+), 68 deletions(-)
diff --git a/celery/__main__.py b/celery/__main__.py
index ba98e0a8a..572f7c3c9 100644
--- a/celery/__main__.py
+++ b/celery/__main__.py
@@ -1,4 +1,4 @@
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
import sys
diff --git a/celery/_state.py b/celery/_state.py
index 7592ca242..9ed62b89d 100644
--- a/celery/_state.py
+++ b/celery/_state.py
@@ -9,7 +9,7 @@
This module shouldn't be used directly.
"""
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
diff --git a/celery/app/__init__.py b/celery/app/__init__.py
index 3c690fb41..22a9700ae 100644
--- a/celery/app/__init__.py
+++ b/celery/app/__init__.py
@@ -6,7 +6,7 @@
Celery Application.
"""
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
import os
diff --git a/celery/backends/base.py b/celery/backends/base.py
index aec6dd3f4..996171abf 100644
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -33,7 +33,6 @@
from celery.result import (
GroupResult, ResultBase, allow_join_result, result_from_tuple,
)
-from celery.utils import timeutils
from celery.utils.functional import LRUCache
from celery.utils.log import get_logger
from celery.utils.serialization import (
@@ -226,7 +225,7 @@ def prepare_expires(self, value, type=None):
if value is None:
value = self.app.conf.CELERY_TASK_RESULT_EXPIRES
if isinstance(value, timedelta):
- value = timeutils.timedelta_seconds(value)
+ value = value.total_seconds()
if value is not None and type:
return type(value)
return value
diff --git a/celery/backends/cassandra.py b/celery/backends/cassandra.py
index 774e6b792..adb70afc7 100644
--- a/celery/backends/cassandra.py
+++ b/celery/backends/cassandra.py
@@ -22,7 +22,7 @@
from celery.exceptions import ImproperlyConfigured
from celery.five import monotonic
from celery.utils.log import get_logger
-from celery.utils.timeutils import maybe_timedelta, timedelta_seconds
+from celery.utils.timeutils import maybe_timedelta
from .base import BaseBackend
@@ -148,14 +148,13 @@ def _do_store():
'children': self.encode(
self.current_task_children(request),
)}
+ ttl = self.expires and max(self.expires.total_seconds(), 0)
if self.detailed_mode:
meta['result'] = result
- cf.insert(task_id, {date_done: self.encode(meta)},
- ttl=self.expires and timedelta_seconds(self.expires))
+ cf.insert(task_id, {date_done: self.encode(meta)}, ttl=ttl)
else:
meta['result'] = self.encode(result)
- cf.insert(task_id, meta,
- ttl=self.expires and timedelta_seconds(self.expires))
+ cf.insert(task_id, meta, ttl=ttl)
return self._retry_on_error(_do_store)
diff --git a/celery/contrib/rdb.py b/celery/contrib/rdb.py
index 3e9f55bba..8435ec31b 100644
--- a/celery/contrib/rdb.py
+++ b/celery/contrib/rdb.py
@@ -34,7 +34,7 @@ def add(x, y):
base port. The selected port will be logged by the worker.
"""
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
import errno
import os
diff --git a/celery/events/dumper.py b/celery/events/dumper.py
index 323afc4e1..3c20186e6 100644
--- a/celery/events/dumper.py
+++ b/celery/events/dumper.py
@@ -7,7 +7,7 @@
as they happen. Think of it like a `tcpdump` for Celery events.
"""
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
import sys
diff --git a/celery/schedules.py b/celery/schedules.py
index 18cf48190..be6832151 100644
--- a/celery/schedules.py
+++ b/celery/schedules.py
@@ -21,8 +21,8 @@
from .five import range, string_t
from .utils import is_iterable
from .utils.timeutils import (
- timedelta_seconds, weekday, maybe_timedelta, remaining,
- humanize_seconds, timezone, maybe_make_aware, ffwd
+ weekday, maybe_timedelta, remaining, humanize_seconds,
+ timezone, maybe_make_aware, ffwd
)
from .datastructures import AttributeDict
@@ -116,7 +116,7 @@ def is_due(self, last_run_at):
"""
last_run_at = self.maybe_make_aware(last_run_at)
rem_delta = self.remaining_estimate(last_run_at)
- remaining_s = timedelta_seconds(rem_delta)
+ remaining_s = max(rem_delta.total_seconds(), 0)
if remaining_s == 0:
return schedstate(is_due=True, next=self.seconds)
return schedstate(is_due=False, next=remaining_s)
@@ -142,7 +142,7 @@ def __reduce__(self):
@property
def seconds(self):
- return timedelta_seconds(self.run_every)
+ return max(self.run_every.total_seconds(), 0)
@property
def human_seconds(self):
@@ -562,11 +562,11 @@ def is_due(self, last_run_at):
"""
rem_delta = self.remaining_estimate(last_run_at)
- rem = timedelta_seconds(rem_delta)
+ rem = max(rem_delta.total_seconds(), 0)
due = rem == 0
if due:
rem_delta = self.remaining_estimate(self.now())
- rem = timedelta_seconds(rem_delta)
+ rem = max(rem_delta.total_seconds(), 0)
return schedstate(due, rem)
def __eq__(self, other):
diff --git a/celery/tests/backends/test_redis.py b/celery/tests/backends/test_redis.py
index 0ecc5258b..ad8b50fc7 100644
--- a/celery/tests/backends/test_redis.py
+++ b/celery/tests/backends/test_redis.py
@@ -10,7 +10,6 @@
from celery import uuid
from celery.datastructures import AttributeDict
from celery.exceptions import ImproperlyConfigured
-from celery.utils.timeutils import timedelta_seconds
from celery.tests.case import (
AppCase, Mock, MockCallbacks, SkipTest, depends_on_current_app, patch,
@@ -202,8 +201,10 @@ def test_default_is_old_join(self):
def test_expires_is_None(self):
b = self.Backend(expires=None, app=self.app, new_join=True)
- self.assertEqual(b.expires, timedelta_seconds(
- self.app.conf.CELERY_TASK_RESULT_EXPIRES))
+ self.assertEqual(
+ b.expires,
+ self.app.conf.CELERY_TASK_RESULT_EXPIRES.total_seconds(),
+ )
def test_expires_is_timedelta(self):
b = self.Backend(
diff --git a/celery/tests/compat_modules/test_compat.py b/celery/tests/compat_modules/test_compat.py
index 58f0cea0c..aa7be5dd4 100644
--- a/celery/tests/compat_modules/test_compat.py
+++ b/celery/tests/compat_modules/test_compat.py
@@ -10,7 +10,6 @@
periodic_task,
PeriodicTask
)
-from celery.utils.timeutils import timedelta_seconds
from celery.tests.case import AppCase, depends_on_current_app
@@ -51,8 +50,9 @@ def test_is_due(self):
self.now() - p.run_every.run_every,
)
self.assertTrue(due)
- self.assertEqual(remaining,
- timedelta_seconds(p.run_every.run_every))
+ self.assertEqual(
+ remaining, p.run_every.run_every.total_seconds(),
+ )
def test_schedule_repr(self):
p = self.my_periodic
diff --git a/celery/tests/utils/test_timeutils.py b/celery/tests/utils/test_timeutils.py
index 2258d064d..f72794017 100644
--- a/celery/tests/utils/test_timeutils.py
+++ b/celery/tests/utils/test_timeutils.py
@@ -10,7 +10,6 @@
humanize_seconds,
maybe_iso8601,
maybe_timedelta,
- timedelta_seconds,
timezone,
rate,
remaining,
@@ -83,19 +82,6 @@ def test_delta_resolution(self):
for delta, shoulda in deltamap:
self.assertEqual(D(dt, delta), shoulda)
- def test_timedelta_seconds(self):
- deltamap = ((timedelta(seconds=1), 1),
- (timedelta(seconds=27), 27),
- (timedelta(minutes=3), 3 * 60),
- (timedelta(hours=4), 4 * 60 * 60),
- (timedelta(days=3), 3 * 86400))
- for delta, seconds in deltamap:
- self.assertEqual(timedelta_seconds(delta), seconds)
-
- def test_timedelta_seconds_returns_0_on_negative_time(self):
- delta = timedelta(days=-2)
- self.assertEqual(timedelta_seconds(delta), 0)
-
def test_humanize_seconds(self):
t = ((4 * 60 * 60 * 24, '4.00 days'),
(1 * 60 * 60 * 24, '1.00 day'),
diff --git a/celery/utils/timeutils.py b/celery/utils/timeutils.py
index bd0b066e6..453be35de 100644
--- a/celery/utils/timeutils.py
+++ b/celery/utils/timeutils.py
@@ -16,7 +16,6 @@
from datetime import date, datetime, timedelta, tzinfo
from kombu.utils import cached_property, reprcall
-from kombu.utils.compat import timedelta_seconds
from pytz import timezone as _timezone, AmbiguousTimeError
@@ -26,7 +25,7 @@
from .iso8601 import parse_iso8601
from .text import pluralize
-__all__ = ['LocalTimezone', 'timezone', 'maybe_timedelta', 'timedelta_seconds',
+__all__ = ['LocalTimezone', 'timezone', 'maybe_timedelta',
'delta_resolution', 'remaining', 'rate', 'weekday',
'humanize_seconds', 'maybe_iso8601', 'is_naive', 'make_aware',
'localize', 'to_utc', 'maybe_make_aware', 'ffwd', 'utcoffset',
@@ -149,7 +148,7 @@ def delta_resolution(dt, delta):
which will just return the original datetime.
"""
- delta = timedelta_seconds(delta)
+ delta = max(delta.total_seconds(), 0)
resolutions = ((3, lambda x: x / 86400),
(4, lambda x: x / 3600),
diff --git a/docs/internals/app-overview.rst b/docs/internals/app-overview.rst
index 602f33d29..0213ac91a 100644
--- a/docs/internals/app-overview.rst
+++ b/docs/internals/app-overview.rst
@@ -98,29 +98,11 @@ Deprecations
Inferior to the ping remote control command.
Will be removed in Celery 2.3.
-Removed deprecations
-====================
-
-* `celery.utils.timedelta_seconds`
- Use: :func:`celery.utils.timeutils.timedelta_seconds`
-
-* `celery.utils.defaultdict`
- Use: :func:`celery.utils.compat.defaultdict`
-
-* `celery.utils.all`
- Use: :func:`celery.utils.compat.all`
-
-* `celery.task.apply_async`
- Use app.send_task
-
-* `celery.task.tasks`
- Use :data:`celery.registry.tasks`
-
Aliases (Pending deprecation)
=============================
* celery.task.base
- * .Task -> {app.create_task_cls}
+ * .Task -> {app.Task / :class:`celery.app.task.Task`}
* celery.task.sets
* .TaskSet -> {app.TaskSet}
diff --git a/extra/release/verify_config_reference.py b/extra/release/verify_config_reference.py
index c36af7a20..8f4038cc4 100644
--- a/extra/release/verify_config_reference.py
+++ b/extra/release/verify_config_reference.py
@@ -1,4 +1,4 @@
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
from fileinput import input as _input
from sys import exit, stderr
@@ -27,8 +27,9 @@ def is_ignored(setting, option):
def find_undocumented_settings(directive='.. setting:: '):
settings = dict(flatten(NAMESPACES))
all = set(settings)
+ inp = (l.decode('utf-8') for l in _input())
documented = set(
- line.strip()[len(directive):].strip() for line in _input()
+ line.strip()[len(directive):].strip() for line in inp
if line.strip().startswith(directive)
)
return [setting for setting in all ^ documented
diff --git a/funtests/benchmarks/bench_worker.py b/funtests/benchmarks/bench_worker.py
index 8de8a3af6..8663e8d7a 100644
--- a/funtests/benchmarks/bench_worker.py
+++ b/funtests/benchmarks/bench_worker.py
@@ -1,4 +1,4 @@
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
import os
import sys
diff --git a/funtests/stress/stress/__main__.py b/funtests/stress/stress/__main__.py
index 6b6f6ceda..f83c8c192 100644
--- a/funtests/stress/stress/__main__.py
+++ b/funtests/stress/stress/__main__.py
@@ -1,4 +1,4 @@
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
from celery.bin.base import Command, Option
diff --git a/funtests/stress/stress/fbi.py b/funtests/stress/stress/fbi.py
index 9b06af654..f0b40fdcf 100644
--- a/funtests/stress/stress/fbi.py
+++ b/funtests/stress/stress/fbi.py
@@ -1,4 +1,4 @@
-from __future__ import absolute_import, print_function
+from __future__ import absolute_import, print_function, unicode_literals
import socket
import sys
diff --git a/funtests/suite/test_leak.py b/funtests/suite/test_leak.py
index bd23c45c1..b19c23f41 100644
--- a/funtests/suite/test_leak.py
+++ b/funtests/suite/test_leak.py
@@ -1,4 +1,4 @@
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
import gc
import os
@@ -15,7 +15,7 @@
import suite # noqa
-GET_RSIZE = '/bin/ps -p {pid} -o rss='
+GET_RSIZE = b'/bin/ps -p {pid} -o rss='
class Sizes(list):
diff --git a/pavement.py b/pavement.py
index 7b077cc27..dd7916acd 100644
--- a/pavement.py
+++ b/pavement.py
@@ -1,4 +1,4 @@
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
import sys
import traceback
From 04ddf8ea88a39448185effbf01ba186f88d27dc2 Mon Sep 17 00:00:00 2001
From: Roger Hu
Date: Thu, 8 May 2014 01:33:47 +0000
Subject: [PATCH 0145/1103] Be more selective about how file descriptors get
removed from Kombu's hub.
Given that file descriptor changes appear to get triggered on the Kombu side, these changes
may not make a material impact. However, to make things more consistent with the
changes introduced in https://github.com/celery/kombu/pull/353, the changes have been
updated here.
This change would also help allow refactoring for remove_reader()/remove_writer() to
be smarter about how file descriptors get managed in the future (i.e. using a counter
instead of removes() to avoid possible race conditions with file descriptors being reused)
---
celery/concurrency/asynpool.py | 27 ++++++++++++++-------------
1 file changed, 14 insertions(+), 13 deletions(-)
diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
index c2dbb0241..5471359c2 100644
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -241,21 +241,21 @@ def _make_process_result(self, hub):
fileno_to_outq = self.fileno_to_outq
on_state_change = self.on_state_change
add_reader = hub.add_reader
- hub_remove = hub.remove
+ remove_reader = hub.remove_reader
recv_message = self._recv_message
def on_result_readable(fileno):
try:
fileno_to_outq[fileno]
except KeyError: # process gone
- return hub_remove(fileno)
+ return remove_reader(fileno)
it = recv_message(add_reader, fileno, on_state_change)
try:
next(it)
except StopIteration:
pass
except (IOError, OSError, EOFError):
- hub_remove(fileno)
+ remove_reader(fileno)
else:
add_reader(fileno, it)
return on_result_readable
@@ -477,7 +477,7 @@ def on_job_ready(self, job, i, obj, inqW_fd):
def _create_process_handlers(self, hub, READ=READ, ERR=ERR):
"""For async pool this will create the handlers called
when a process is up/down and etc."""
- add_reader, hub_remove = hub.add_reader, hub.remove
+ add_reader, remove_reader, remove_writer = hub.add_reader, hub.remove_reader, hub.remove_writer
cache = self._cache
all_inqueues = self._all_inqueues
fileno_to_inq = self._fileno_to_inq
@@ -528,7 +528,7 @@ def on_process_up(proc):
self.on_process_up = on_process_up
- def _remove_from_index(obj, proc, index, callback=None):
+ def _remove_from_index(obj, proc, index, remove_func, callback=None):
# this remove the file descriptors for a process from
# the indices. we have to make sure we don't overwrite
# another processes fds, as the fds may be reused.
@@ -544,7 +544,7 @@ def _remove_from_index(obj, proc, index, callback=None):
except KeyError:
pass
else:
- hub_remove(fd)
+ remove_func(fd)
if callback is not None:
callback(fd)
return fd
@@ -554,23 +554,24 @@ def on_process_down(proc):
if proc.dead:
return
process_flush_queues(proc)
- _remove_from_index(proc.outq._reader, proc, fileno_to_outq)
+ _remove_from_index(proc.outq._reader, proc, fileno_to_outq, remove_func=remove_reader)
if proc.synq:
- _remove_from_index(proc.synq._writer, proc, fileno_to_synq)
+ _remove_from_index(proc.synq._writer, proc, fileno_to_synq, remove_func=remove_writer)
inq = _remove_from_index(proc.inq._writer, proc, fileno_to_inq,
+ remove_func=remove_writer,
callback=all_inqueues.discard)
if inq:
busy_workers.discard(inq)
- hub_remove(proc.sentinel)
+ remove_reader(proc.sentinel)
waiting_to_start.discard(proc)
self._active_writes.discard(proc.inqW_fd)
- hub_remove(proc.inqW_fd)
- hub_remove(proc.outqR_fd)
+ remove_writer(proc.inqW_fd)
+ remove_reader(proc.outqR_fd)
if proc.synqR_fd:
- hub_remove(proc.synqR_fd)
+ remove_reader(proc.synqR_fd)
if proc.synqW_fd:
self._active_writes.discard(proc.synqW_fd)
- hub_remove(proc.synqW_fd)
+ remove_reader(proc.synqW_fd)
self.on_process_down = on_process_down
def _create_write_handlers(self, hub,
From 318f039f935453f415a6a99f4e5b401af2286aa5 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 16:01:23 +0100
Subject: [PATCH 0146/1103] Fixes func -> fun (Issue #2032)
---
celery/concurrency/asynpool.py | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
index 5471359c2..78e8a2136 100644
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -528,7 +528,7 @@ def on_process_up(proc):
self.on_process_up = on_process_up
- def _remove_from_index(obj, proc, index, remove_func, callback=None):
+ def _remove_from_index(obj, proc, index, remove_fun, callback=None):
# this remove the file descriptors for a process from
# the indices. we have to make sure we don't overwrite
# another processes fds, as the fds may be reused.
@@ -544,7 +544,7 @@ def _remove_from_index(obj, proc, index, remove_func, callback=None):
except KeyError:
pass
else:
- remove_func(fd)
+ remove_fun(fd)
if callback is not None:
callback(fd)
return fd
@@ -554,11 +554,11 @@ def on_process_down(proc):
if proc.dead:
return
process_flush_queues(proc)
- _remove_from_index(proc.outq._reader, proc, fileno_to_outq, remove_func=remove_reader)
+ _remove_from_index(proc.outq._reader, proc, fileno_to_outq, remove_fun=remove_reader)
if proc.synq:
- _remove_from_index(proc.synq._writer, proc, fileno_to_synq, remove_func=remove_writer)
+ _remove_from_index(proc.synq._writer, proc, fileno_to_synq, remove_fun=remove_writer)
inq = _remove_from_index(proc.inq._writer, proc, fileno_to_inq,
- remove_func=remove_writer,
+ remove_fun=remove_writer,
callback=all_inqueues.discard)
if inq:
busy_workers.discard(inq)
From fe22f278fb130ee4d97792c27361f4a6a548f98e Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 17:01:15 +0100
Subject: [PATCH 0147/1103] Fixes unicode problem with beat after adding
unicode_literals
---
celery/apps/beat.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/celery/apps/beat.py b/celery/apps/beat.py
index 97fa4829a..0d053de91 100644
--- a/celery/apps/beat.py
+++ b/celery/apps/beat.py
@@ -98,10 +98,10 @@ def start_scheduler(self):
scheduler_cls=self.scheduler_cls,
schedule_filename=self.schedule)
- print(str(c.blue('__ ', c.magenta('-'),
- c.blue(' ... __ '), c.magenta('-'),
- c.blue(' _\n'),
- c.reset(self.startup_info(beat)))))
+ print(unicode(c.blue('__ ', c.magenta('-'),
+ c.blue(' ... __ '), c.magenta('-'),
+ c.blue(' _\n'),
+ c.reset(self.startup_info(beat)))))
self.setup_logging()
if self.socket_timeout:
logger.debug('Setting default socket timeout to %r',
From 8187301dfdc1a0bafc29f0818198c722a0dea94d Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 17:03:57 +0100
Subject: [PATCH 0148/1103] Fixes flakes for Issue #2032
---
celery/concurrency/asynpool.py | 19 +++++++++++++------
1 file changed, 13 insertions(+), 6 deletions(-)
diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
index 78e8a2136..70b89cef9 100644
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -477,7 +477,9 @@ def on_job_ready(self, job, i, obj, inqW_fd):
def _create_process_handlers(self, hub, READ=READ, ERR=ERR):
"""For async pool this will create the handlers called
when a process is up/down and etc."""
- add_reader, remove_reader, remove_writer = hub.add_reader, hub.remove_reader, hub.remove_writer
+ add_reader, remove_reader, remove_writer = (
+ hub.add_reader, hub.remove_reader, hub.remove_writer,
+ )
cache = self._cache
all_inqueues = self._all_inqueues
fileno_to_inq = self._fileno_to_inq
@@ -554,12 +556,17 @@ def on_process_down(proc):
if proc.dead:
return
process_flush_queues(proc)
- _remove_from_index(proc.outq._reader, proc, fileno_to_outq, remove_fun=remove_reader)
+ _remove_from_index(
+ proc.outq._reader, proc, fileno_to_outq, remove_reader,
+ )
if proc.synq:
- _remove_from_index(proc.synq._writer, proc, fileno_to_synq, remove_fun=remove_writer)
- inq = _remove_from_index(proc.inq._writer, proc, fileno_to_inq,
- remove_fun=remove_writer,
- callback=all_inqueues.discard)
+ _remove_from_index(
+ proc.synq._writer, proc, fileno_to_synq, remove_writer,
+ )
+ inq = _remove_from_index(
+ proc.inq._writer, proc, fileno_to_inq, remove_writer,
+ callback=all_inqueues.discard,
+ )
if inq:
busy_workers.discard(inq)
remove_reader(proc.sentinel)
From dc28e8a54d0c654e473f949be650ac53433ed9c5 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 17:05:50 +0100
Subject: [PATCH 0149/1103] Worker now preserves exit code. Closes #2024
---
celery/apps/worker.py | 14 ++++++-----
celery/bin/worker.py | 6 +++--
celery/tests/bin/test_worker.py | 40 +++++++++++++++++-------------
celery/tests/case.py | 9 +++++++
celery/tests/worker/test_loops.py | 11 ++++----
celery/tests/worker/test_state.py | 33 ++++++++++++++++++++++--
celery/tests/worker/test_worker.py | 5 ++--
celery/worker/__init__.py | 19 +++++++++-----
celery/worker/loops.py | 12 ++++++---
celery/worker/state.py | 12 ++++-----
10 files changed, 111 insertions(+), 50 deletions(-)
diff --git a/celery/apps/worker.py b/celery/apps/worker.py
index d19071108..521ef5f95 100644
--- a/celery/apps/worker.py
+++ b/celery/apps/worker.py
@@ -30,7 +30,7 @@
)
from celery.five import string, string_t
from celery.loaders.app import AppLoader
-from celery.platforms import check_privileges
+from celery.platforms import EX_FAILURE, EX_OK, check_privileges
from celery.utils import cry, isatty
from celery.utils.imports import qualname
from celery.utils.log import get_logger, in_sighandler, set_in_sighandler
@@ -277,7 +277,7 @@ def set_process_status(self, info):
def _shutdown_handler(worker, sig='TERM', how='Warm',
- exc=WorkerShutdown, callback=None):
+ exc=WorkerShutdown, callback=None, exitcode=EX_OK):
def _handle_request(*args):
with in_sighandler():
@@ -288,9 +288,9 @@ def _handle_request(*args):
safe_say('worker: {0} shutdown (MainProcess)'.format(how))
if active_thread_count() > 1:
setattr(state, {'Warm': 'should_stop',
- 'Cold': 'should_terminate'}[how], True)
+ 'Cold': 'should_terminate'}[how], exitcode)
else:
- raise exc()
+ raise exc(exitcode)
_handle_request.__name__ = str('worker_{0}'.format(how))
platforms.signals[sig] = _handle_request
install_worker_term_handler = partial(
@@ -299,6 +299,7 @@ def _handle_request(*args):
if not is_jython: # pragma: no cover
install_worker_term_hard_handler = partial(
_shutdown_handler, sig='SIGQUIT', how='Cold', exc=WorkerTerminate,
+ exitcode=EX_FAILURE,
)
else: # pragma: no cover
install_worker_term_handler = \
@@ -310,7 +311,8 @@ def on_SIGINT(worker):
install_worker_term_hard_handler(worker, sig='SIGINT')
if not is_jython: # pragma: no cover
install_worker_int_handler = partial(
- _shutdown_handler, sig='SIGINT', callback=on_SIGINT
+ _shutdown_handler, sig='SIGINT', callback=on_SIGINT,
+ exitcode=EX_FAILURE,
)
else: # pragma: no cover
install_worker_int_handler = lambda *a, **kw: None
@@ -332,7 +334,7 @@ def restart_worker_sig_handler(*args):
import atexit
atexit.register(_reload_current_worker)
from celery.worker import state
- state.should_stop = True
+ state.should_stop = EX_OK
platforms.signals[sig] = restart_worker_sig_handler
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index d5592f85f..05b249d69 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -205,12 +205,14 @@ def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
loglevel, '|'.join(
l for l in LOG_LEVELS if isinstance(l, string_t))))
- return self.app.Worker(
+ worker = self.app.Worker(
hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
logfile=logfile, # node format handled by celery.app.log.setup
pidfile=self.node_format(pidfile, hostname),
state_db=self.node_format(state_db, hostname), **kwargs
- ).start()
+ )
+ worker.start()
+ return worker.exitcode
def with_pool_option(self, argv):
# this command support custom pools
diff --git a/celery/tests/bin/test_worker.py b/celery/tests/bin/test_worker.py
index e4ebf7157..864271172 100644
--- a/celery/tests/bin/test_worker.py
+++ b/celery/tests/bin/test_worker.py
@@ -17,6 +17,7 @@
from celery.exceptions import (
ImproperlyConfigured, WorkerShutdown, WorkerTerminate,
)
+from celery.platforms import EX_FAILURE, EX_OK
from celery.utils.log import ensure_process_aware_logger
from celery.worker import state
@@ -490,8 +491,8 @@ def test_worker_int_handler(self):
worker = self._Worker()
handlers = self.psig(cd.install_worker_int_handler, worker)
next_handlers = {}
- state.should_stop = False
- state.should_terminate = False
+ state.should_stop = None
+ state.should_terminate = None
class Signals(platforms.Signals):
@@ -504,15 +505,17 @@ def __setitem__(self, sig, handler):
try:
handlers['SIGINT']('SIGINT', object())
self.assertTrue(state.should_stop)
+ self.assertEqual(state.should_stop, EX_FAILURE)
finally:
platforms.signals = p
- state.should_stop = False
+ state.should_stop = None
try:
next_handlers['SIGINT']('SIGINT', object())
self.assertTrue(state.should_terminate)
+ self.assertEqual(state.should_terminate, EX_FAILURE)
finally:
- state.should_terminate = False
+ state.should_terminate = None
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
@@ -543,7 +546,7 @@ def test_worker_int_handler_only_stop_MainProcess(self):
self.assertTrue(state.should_stop)
finally:
process.name = name
- state.should_stop = False
+ state.should_stop = None
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
@@ -554,7 +557,7 @@ def test_worker_int_handler_only_stop_MainProcess(self):
handlers['SIGINT']('SIGINT', object())
finally:
process.name = name
- state.should_stop = False
+ state.should_stop = None
@disable_stdouts
def test_install_HUP_not_supported_handler(self):
@@ -580,14 +583,17 @@ def test_worker_term_hard_handler_only_stop_MainProcess(self):
handlers['SIGQUIT']('SIGQUIT', object())
self.assertTrue(state.should_terminate)
finally:
- state.should_terminate = False
+ state.should_terminate = None
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
worker = self._Worker()
handlers = self.psig(
cd.install_worker_term_hard_handler, worker)
- with self.assertRaises(WorkerTerminate):
- handlers['SIGQUIT']('SIGQUIT', object())
+ try:
+ with self.assertRaises(WorkerTerminate):
+ handlers['SIGQUIT']('SIGQUIT', object())
+ finally:
+ state.should_terminate = None
finally:
process.name = name
@@ -599,9 +605,9 @@ def test_worker_term_handler_when_threads(self):
handlers = self.psig(cd.install_worker_term_handler, worker)
try:
handlers['SIGTERM']('SIGTERM', object())
- self.assertTrue(state.should_stop)
+ self.assertEqual(state.should_stop, EX_OK)
finally:
- state.should_stop = False
+ state.should_stop = None
@disable_stdouts
def test_worker_term_handler_when_single_thread(self):
@@ -613,7 +619,7 @@ def test_worker_term_handler_when_single_thread(self):
with self.assertRaises(WorkerShutdown):
handlers['SIGTERM']('SIGTERM', object())
finally:
- state.should_stop = False
+ state.should_stop = None
@patch('sys.__stderr__')
@skip_if_pypy
@@ -637,7 +643,7 @@ def test_worker_term_handler_only_stop_MainProcess(self):
worker = self._Worker()
handlers = self.psig(cd.install_worker_term_handler, worker)
handlers['SIGTERM']('SIGTERM', object())
- self.assertTrue(state.should_stop)
+ self.assertEqual(state.should_stop, EX_OK)
with patch('celery.apps.worker.active_thread_count') as c:
c.return_value = 1
worker = self._Worker()
@@ -646,7 +652,7 @@ def test_worker_term_handler_only_stop_MainProcess(self):
handlers['SIGTERM']('SIGTERM', object())
finally:
process.name = name
- state.should_stop = False
+ state.should_stop = None
@disable_stdouts
@patch('celery.platforms.close_open_fds')
@@ -665,14 +671,14 @@ def _execv(*args):
worker = self._Worker()
handlers = self.psig(cd.install_worker_restart_handler, worker)
handlers['SIGHUP']('SIGHUP', object())
- self.assertTrue(state.should_stop)
+ self.assertEqual(state.should_stop, EX_OK)
self.assertTrue(register.called)
callback = register.call_args[0][0]
callback()
self.assertTrue(argv)
finally:
os.execv = execv
- state.should_stop = False
+ state.should_stop = None
@disable_stdouts
def test_worker_term_hard_handler_when_threaded(self):
@@ -684,7 +690,7 @@ def test_worker_term_hard_handler_when_threaded(self):
handlers['SIGQUIT']('SIGQUIT', object())
self.assertTrue(state.should_terminate)
finally:
- state.should_terminate = False
+ state.should_terminate = None
@disable_stdouts
def test_worker_term_hard_handler_when_single_threaded(self):
diff --git a/celery/tests/case.py b/celery/tests/case.py
index 77a2dbc5d..551d0dfbb 100644
--- a/celery/tests/case.py
+++ b/celery/tests/case.py
@@ -464,6 +464,15 @@ def _teardown_app(self):
self._threads_at_setup, list(threading.enumerate()),
)
+ # Make sure no test left the shutdown flags enabled.
+ from celery.worker import state as worker_state
+ # check for EX_OK
+ self.assertIsNot(worker_state.should_stop, False)
+ self.assertIsNot(worker_state.should_terminate, False)
+ # check for other true values
+ self.assertFalse(worker_state.should_stop)
+ self.assertFalse(worker_state.should_terminate)
+
def _get_test_name(self):
return '.'.join([self.__class__.__name__, self._testMethodName])
diff --git a/celery/tests/worker/test_loops.py b/celery/tests/worker/test_loops.py
index fd6c8046f..4030782f4 100644
--- a/celery/tests/worker/test_loops.py
+++ b/celery/tests/worker/test_loops.py
@@ -7,6 +7,7 @@
from celery.bootsteps import CLOSE, RUN
from celery.exceptions import InvalidTaskError, WorkerShutdown, WorkerTerminate
from celery.five import Empty
+from celery.platforms import EX_FAILURE
from celery.worker import state
from celery.worker.consumer import Consumer
from celery.worker.loops import asynloop, synloop
@@ -179,27 +180,27 @@ def test_should_terminate(self):
with self.assertRaises(WorkerTerminate):
asynloop(*x.args)
finally:
- state.should_terminate = False
+ state.should_terminate = None
def test_should_terminate_hub_close_raises(self):
x = X(self.app)
# XXX why aren't the errors propagated?!?
- state.should_terminate = True
+ state.should_terminate = EX_FAILURE
x.hub.close.side_effect = MemoryError()
try:
with self.assertRaises(WorkerTerminate):
asynloop(*x.args)
finally:
- state.should_terminate = False
+ state.should_terminate = None
def test_should_stop(self):
x = X(self.app)
- state.should_stop = True
+ state.should_stop = 303
try:
with self.assertRaises(WorkerShutdown):
asynloop(*x.args)
finally:
- state.should_stop = False
+ state.should_stop = None
def test_updates_qos(self):
x = X(self.app)
diff --git a/celery/tests/worker/test_state.py b/celery/tests/worker/test_state.py
index ede9a00a1..707fb1fe8 100644
--- a/celery/tests/worker/test_state.py
+++ b/celery/tests/worker/test_state.py
@@ -48,13 +48,42 @@ class MyPersistent(state.Persistent):
class test_maybe_shutdown(AppCase):
def teardown(self):
- state.should_stop = False
- state.should_terminate = False
+ state.should_stop = None
+ state.should_terminate = None
def test_should_stop(self):
state.should_stop = True
with self.assertRaises(WorkerShutdown):
state.maybe_shutdown()
+ state.should_stop = 0
+ with self.assertRaises(WorkerShutdown):
+ state.maybe_shutdown()
+ state.should_stop = False
+ try:
+ state.maybe_shutdown()
+ except SystemExit:
+ raise RuntimeError('should not have exited')
+ state.should_stop = None
+ try:
+ state.maybe_shutdown()
+ except SystemExit:
+ raise RuntimeError('should not have exited')
+
+ state.should_stop = 0
+ try:
+ state.maybe_shutdown()
+ except SystemExit as exc:
+ self.assertEqual(exc.code, 0)
+ else:
+ raise RuntimeError('should have exited')
+
+ state.should_stop = 303
+ try:
+ state.maybe_shutdown()
+ except SystemExit as exc:
+ self.assertEqual(exc.code, 303)
+ else:
+ raise RuntimeError('should have exited')
def test_should_terminate(self):
state.should_terminate = True
diff --git a/celery/tests/worker/test_worker.py b/celery/tests/worker/test_worker.py
index f3a3e1494..5ac5f6a9a 100644
--- a/celery/tests/worker/test_worker.py
+++ b/celery/tests/worker/test_worker.py
@@ -20,6 +20,7 @@
WorkerShutdown, WorkerTerminate, TaskRevokedError, InvalidTaskError,
)
from celery.five import Empty, range, Queue as FastQueue
+from celery.platforms import EX_FAILURE
from celery.utils import uuid
from celery.worker import components
from celery.worker import consumer
@@ -864,7 +865,7 @@ def test_shutdown_no_blueprint(self):
self.worker.blueprint = None
self.worker._shutdown()
- @patch('celery.platforms.create_pidlock')
+ @patch('celery.worker.create_pidlock')
def test_use_pidfile(self, create_pidlock):
create_pidlock.return_value = Mock()
worker = self.create_worker(pidfile='pidfilelockfilepid')
@@ -1112,7 +1113,7 @@ def test_step_raises(self):
step.start.side_effect = TypeError()
worker.stop = Mock()
worker.start()
- worker.stop.assert_called_with()
+ worker.stop.assert_called_with(exitcode=EX_FAILURE)
def test_state(self):
self.assertTrue(self.worker.state)
diff --git a/celery/worker/__init__.py b/celery/worker/__init__.py
index 29a095939..74c981d82 100644
--- a/celery/worker/__init__.py
+++ b/celery/worker/__init__.py
@@ -26,12 +26,12 @@
from celery import bootsteps
from celery.bootsteps import RUN, TERMINATE
from celery import concurrency as _concurrency
-from celery import platforms
from celery import signals
from celery.exceptions import (
ImproperlyConfigured, WorkerTerminate, TaskRevokedError,
)
from celery.five import string_t, values
+from celery.platforms import EX_FAILURE, create_pidlock
from celery.utils import default_nodename, worker_direct
from celery.utils.imports import reload_from_cwd
from celery.utils.log import mlevel, worker_logger as logger
@@ -73,6 +73,9 @@ class WorkController(object):
pool = None
semaphore = None
+ #: contains the exit code if a :exc:`SystemExit` event is handled.
+ exitcode = None
+
class Blueprint(bootsteps.Blueprint):
"""Worker bootstep blueprint."""
name = 'Worker'
@@ -150,7 +153,7 @@ def on_after_init(self, **kwargs):
def on_start(self):
if self.pidfile:
- self.pidlock = platforms.create_pidlock(self.pidfile)
+ self.pidlock = create_pidlock(self.pidfile)
def on_consumer_ready(self, consumer):
pass
@@ -207,9 +210,11 @@ def start(self):
self.terminate()
except Exception as exc:
logger.error('Unrecoverable error: %r', exc, exc_info=True)
- self.stop()
- except (KeyboardInterrupt, SystemExit):
- self.stop()
+ self.stop(exitcode=EX_FAILURE)
+ except SystemExit as exc:
+ self.stop(exitcode=exc.code)
+ except KeyboardInterrupt:
+ self.stop(exitcode=EX_FAILURE)
def register_with_event_loop(self, hub):
self.blueprint.send_all(
@@ -243,8 +248,10 @@ def should_use_eventloop(self):
return (detect_environment() == 'default' and
self._conninfo.is_evented and not self.app.IS_WINDOWS)
- def stop(self, in_sighandler=False):
+ def stop(self, in_sighandler=False, exitcode=None):
"""Graceful shutdown of the worker server."""
+ if exitcode is not None:
+ self.exitcode = exitcode
if self.blueprint.state == RUN:
self.signal_consumer_close()
if not in_sighandler or self.pool.signal_safe:
diff --git a/celery/worker/loops.py b/celery/worker/loops.py
index 406633e00..5faa99e24 100644
--- a/celery/worker/loops.py
+++ b/celery/worker/loops.py
@@ -57,10 +57,14 @@ def asynloop(obj, connection, consumer, blueprint, hub, qos,
try:
while blueprint.state == RUN and obj.connection:
# shutdown if signal handlers told us to.
- if state.should_stop:
- raise WorkerShutdown()
- elif state.should_terminate:
- raise WorkerTerminate()
+ should_stop, should_terminate = (
+ state.should_stop, state.should_terminate,
+ )
+ # False == EX_OK, so must use is not False
+ if should_stop is not None and should_stop is not False:
+ raise WorkerShutdown(should_stop)
+ elif should_terminate is not None and should_stop is not False:
+ raise WorkerTerminate(should_terminate)
# We only update QoS when there is no more messages to read.
# This groups together qos calls, and makes sure that remote
diff --git a/celery/worker/state.py b/celery/worker/state.py
index 3e1ab95c5..2e2773e56 100644
--- a/celery/worker/state.py
+++ b/celery/worker/state.py
@@ -60,15 +60,15 @@
#: Update global state when a task has been reserved.
task_reserved = reserved_requests.add
-should_stop = False
-should_terminate = False
+should_stop = None
+should_terminate = None
def maybe_shutdown():
- if should_stop:
- raise WorkerShutdown()
- elif should_terminate:
- raise WorkerTerminate()
+ if should_stop is not None and should_stop is not False:
+ raise WorkerShutdown(should_stop)
+ elif should_terminate is not None and should_terminate is not False:
+ raise WorkerTerminate(should_terminate)
def task_accepted(request, _all_total_count=all_total_count):
From e0389ec83d081ac882e7c943bdd10bdc7f7c379a Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 17:32:19 +0100
Subject: [PATCH 0150/1103] Fixes Signature.link* to work when link option is
not a list. Closes #2019
---
celery/canvas.py | 4 +++-
celery/tests/tasks/test_canvas.py | 8 ++++++++
2 files changed, 11 insertions(+), 1 deletion(-)
diff --git a/celery/canvas.py b/celery/canvas.py
index f7ba6d1cd..4d8e713af 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -12,7 +12,7 @@
"""
from __future__ import absolute_import
-from collections import deque
+from collections import MutableSequence, deque
from copy import deepcopy
from functools import partial as _partial, reduce
from operator import itemgetter
@@ -244,6 +244,8 @@ def apply_async(self, args=(), kwargs={}, **options):
def append_to_list_option(self, key, value):
items = self.options.setdefault(key, [])
+ if not isinstance(items, MutableSequence):
+ items = self.options[key] = [items]
if value not in items:
items.append(value)
return value
diff --git a/celery/tests/tasks/test_canvas.py b/celery/tests/tasks/test_canvas.py
index 4c071a8a1..25080252b 100644
--- a/celery/tests/tasks/test_canvas.py
+++ b/celery/tests/tasks/test_canvas.py
@@ -59,6 +59,14 @@ def test_getitem_property(self):
self.assertEqual(SIG.options, {'task_id': 'TASK_ID'})
self.assertEqual(SIG.subtask_type, '')
+ def test_link_on_scalar(self):
+ x = Signature('TASK', link=Signature('B'))
+ self.assertTrue(x.options['link'])
+ x.link(Signature('C'))
+ self.assertIsInstance(x.options['link'], list)
+ self.assertIn(Signature('B'), x.options['link'])
+ self.assertIn(Signature('C'), x.options['link'])
+
def test_replace(self):
x = Signature('TASK', ('A'), {})
self.assertTupleEqual(x.replace(args=('B', )).args, ('B', ))
From c68e2b9441f435981ae77c2b6ff12cf1a8281f3a Mon Sep 17 00:00:00 2001
From: Dmitry Malinovsky
Date: Tue, 29 Apr 2014 17:01:13 +0700
Subject: [PATCH 0151/1103] Added myself to contributors
Conflicts:
CONTRIBUTORS.txt
---
CONTRIBUTORS.txt | 1 +
1 file changed, 1 insertion(+)
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index 3484a6712..ac27a0c74 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -160,4 +160,5 @@ Martin Davidsson, 2014/02/08
Chris Clark, 2014/02/20
Matthew Duggan, 2014/04/10
Brian Bouterse, 2014/04/10
+Dmitry Malinovsky, 2014/04/28
Luke Pomfrey, 2014/05/06
From 233aae2f8e6c107b75f4f62191e0b2ef18b6e680 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 17:57:04 +0100
Subject: [PATCH 0152/1103] Programs: amqp command no longer messes up argv
---
celery/bin/amqp.py | 16 +++++++++-------
celery/tests/bin/test_amqp.py | 2 +-
2 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/celery/bin/amqp.py b/celery/bin/amqp.py
index 4dab1527a..638b5ed7a 100644
--- a/celery/bin/amqp.py
+++ b/celery/bin/amqp.py
@@ -246,32 +246,34 @@ def completenames(self, text, *ignored):
return [cmd for cmd in names
if cmd.partition('.')[2].startswith(text)]
- def dispatch(self, cmd, argline):
+ def dispatch(self, cmd, arglist):
"""Dispatch and execute the command.
Lookup order is: :attr:`builtins` -> :attr:`amqp`.
"""
- arglist = shlex.split(safe_str(argline))
+ if isinstance(arglist, string_t):
+ arglist = shlex.split(safe_str(arglist))
if cmd in self.builtins:
return getattr(self, self.builtins[cmd])(*arglist)
fun, args, formatter = self.get_amqp_api_command(cmd, arglist)
return formatter(fun(*args))
- def parseline(self, line):
+ def parseline(self, parts):
"""Parse input line.
:returns: tuple of three items:
`(command_name, arglist, original_line)`
"""
- parts = line.split()
if parts:
- return parts[0], ' '.join(parts[1:]), line
- return '', '', line
+ return parts[0], parts[1:], ' '.join(parts)
+ return '', '', ''
def onecmd(self, line):
"""Parse line and execute command."""
+ if isinstance(line, string_t):
+ line = shlex.split(safe_str(line))
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
@@ -326,7 +328,7 @@ def connect(self, conn=None):
def run(self):
shell = self.Shell(connect=self.connect, out=self.out)
if self.args:
- return shell.onecmd(' '.join(self.args))
+ return shell.onecmd(self.args)
try:
return shell.cmdloop()
except KeyboardInterrupt:
diff --git a/celery/tests/bin/test_amqp.py b/celery/tests/bin/test_amqp.py
index 8840a9f10..20ab44168 100644
--- a/celery/tests/bin/test_amqp.py
+++ b/celery/tests/bin/test_amqp.py
@@ -124,7 +124,7 @@ def test_note(self):
self.assertNotIn('FOO', self.fh.getvalue())
def test_run(self):
- a = self.create_adm('queue.declare foo')
+ a = self.create_adm('queue.declare', 'foo')
a.run()
self.assertIn('ok', self.fh.getvalue())
From 41c7307da401af8ff921e9526e724350bdf4ad5e Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 19:01:54 +0100
Subject: [PATCH 0153/1103] Sanitized result backend URI in banner, reports and
inspect conf.
Depends on celery/kombu@f4ef17236e0085b0d948162cfbaa6d42935e2dca
+ celery/kombu@66419eb780c8392286212c7a73c525277b10c970
Closes #2004
---
celery/app/utils.py | 7 ++++++-
celery/apps/worker.py | 5 ++++-
2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/celery/app/utils.py b/celery/app/utils.py
index d017de2a3..f8bd9837f 100644
--- a/celery/app/utils.py
+++ b/celery/app/utils.py
@@ -15,6 +15,8 @@
from collections import Mapping
from types import ModuleType
+from kombu.utils.url import maybe_sanitize_url
+
from celery.datastructures import ConfigurationView
from celery.five import items, string_t, values
from celery.platforms import pyimplementation
@@ -177,9 +179,12 @@ def maybe_censor(key, value, mask='*' * 8):
if isinstance(key, string_t):
if HIDDEN_SETTINGS.search(key):
return mask
- if 'BROKER_URL' in key.upper():
+ elif 'BROKER_URL' in key.upper():
from kombu import Connection
return Connection(value).as_uri(mask=mask)
+ elif key.upper() in ('CELERY_RESULT_BACKEND', 'CELERY_BACKEND'):
+ return maybe_sanitize_url(value, mask=mask)
+
return value
return {k: maybe_censor(k, v) for k, v in items(conf)}
diff --git a/celery/apps/worker.py b/celery/apps/worker.py
index 521ef5f95..ae0c8d509 100644
--- a/celery/apps/worker.py
+++ b/celery/apps/worker.py
@@ -22,6 +22,7 @@
from billiard import current_process
from kombu.utils.encoding import safe_str
+from kombu.utils.url import maybe_sanitize_url
from celery import VERSION_BANNER, platforms, signals
from celery.app import trace
@@ -227,7 +228,9 @@ def startup_info(self):
hostname=safe_str(self.hostname),
version=VERSION_BANNER,
conninfo=self.app.connection().as_uri(),
- results=self.app.conf.CELERY_RESULT_BACKEND or 'disabled',
+ results=maybe_sanitize_url(
+ self.app.conf.CELERY_RESULT_BACKEND or 'disabled',
+ ),
concurrency=concurrency,
platform=safe_str(_platform.platform()),
events=events,
From b6e0aff80bdc7b80f62214aebd4f688b6bb9398b Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 19:14:09 +0100
Subject: [PATCH 0154/1103] CI must now depend on dev branches
---
tox.ini | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/tox.ini b/tox.ini
index bde53e19a..304980a15 100644
--- a/tox.ini
+++ b/tox.ini
@@ -14,6 +14,7 @@ basepython = python3.4
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test3.txt
-r{toxinidir}/requirements/test-ci.txt
+ -r{toxinidir}/requirements/dev.txt
setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
@@ -23,6 +24,7 @@ basepython = python3.3
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test3.txt
-r{toxinidir}/requirements/test-ci.txt
+ -r{toxinidir}/requirements/dev.txt
setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
@@ -32,6 +34,7 @@ basepython = python2.7
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/test-ci.txt
+ -r{toxinidir}/requirements/dev.txt
setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
@@ -41,6 +44,7 @@ basepython = pypy
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/test-ci.txt
+ -r{toxinidir}/requirements/dev.txt
setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
From e20ba0df2c5fb78dcb53369121c33d1360203c96 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 19:20:18 +0100
Subject: [PATCH 0155/1103] tox no like no zipballs :cow:
---
tox.ini | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/tox.ini b/tox.ini
index 304980a15..a15e36635 100644
--- a/tox.ini
+++ b/tox.ini
@@ -14,9 +14,9 @@ basepython = python3.4
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test3.txt
-r{toxinidir}/requirements/test-ci.txt
- -r{toxinidir}/requirements/dev.txt
setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
+ pip install -U -r{toxinidir}/requirements/dev.txt
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:3.3]
@@ -24,9 +24,9 @@ basepython = python3.3
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test3.txt
-r{toxinidir}/requirements/test-ci.txt
- -r{toxinidir}/requirements/dev.txt
setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
+ pip install -U -r{toxinidir}/requirements/dev.txt
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:2.7]
@@ -34,9 +34,9 @@ basepython = python2.7
deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/test-ci.txt
- -r{toxinidir}/requirements/dev.txt
setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
+ pip install -U -r{toxinidir}/requirements/dev.txt
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
[testenv:pypy]
@@ -47,4 +47,5 @@ deps = -r{toxinidir}/requirements/default.txt
-r{toxinidir}/requirements/dev.txt
setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
+ pip install -U -r{toxinidir}/requirements/dev.txt
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
From f92131ac2a4eb9c6e5b9278fcd8fe5db3d66cb51 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 20:23:15 +0100
Subject: [PATCH 0156/1103] Tests passing on Python3
---
celery/apps/beat.py | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/celery/apps/beat.py b/celery/apps/beat.py
index 0d053de91..356a3753d 100644
--- a/celery/apps/beat.py
+++ b/celery/apps/beat.py
@@ -17,6 +17,7 @@
import sys
from celery import VERSION_BANNER, platforms, beat
+from celery.five import text_t
from celery.utils.imports import qualname
from celery.utils.log import LOG_LEVELS, get_logger
from celery.utils.timeutils import humanize_seconds
@@ -98,10 +99,12 @@ def start_scheduler(self):
scheduler_cls=self.scheduler_cls,
schedule_filename=self.schedule)
- print(unicode(c.blue('__ ', c.magenta('-'),
- c.blue(' ... __ '), c.magenta('-'),
- c.blue(' _\n'),
- c.reset(self.startup_info(beat)))))
+ print(text_t(
+ c.blue('__ ', c.magenta('-'),
+ c.blue(' ... __ '), c.magenta('-'),
+ c.blue(' _\n'),
+ c.reset(self.startup_info(beat))),
+ ))
self.setup_logging()
if self.socket_timeout:
logger.debug('Setting default socket timeout to %r',
From 66672c0f17003712ab58705d0b213f47b784b41b Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 22:12:36 +0100
Subject: [PATCH 0157/1103] Use kombu.utils.json instead of anyjson :sad:
---
celery/apps/beat.py | 2 +-
celery/bin/celery.py | 7 ++++---
celery/events/__init__.py | 15 ---------------
celery/five.py | 8 --------
celery/loaders/base.py | 4 ++--
celery/task/http.py | 5 +++--
celery/tests/bin/test_celery.py | 3 ++-
celery/tests/compat_modules/test_http.py | 2 +-
celery/tests/compat_modules/test_sets.py | 5 +++--
celery/worker/consumer.py | 2 +-
celery/worker/strategy.py | 2 +-
docs/faq.rst | 15 +--------------
docs/userguide/remote-tasks.rst | 4 ++--
examples/httpexample/views.py | 2 +-
funtests/benchmarks/bench_worker.py | 7 -------
15 files changed, 22 insertions(+), 61 deletions(-)
diff --git a/celery/apps/beat.py b/celery/apps/beat.py
index 356a3753d..3daecd11f 100644
--- a/celery/apps/beat.py
+++ b/celery/apps/beat.py
@@ -99,7 +99,7 @@ def start_scheduler(self):
scheduler_cls=self.scheduler_cls,
schedule_filename=self.schedule)
- print(text_t(
+ print(text_t( # noqa (pyflakes chokes on print)
c.blue('__ ', c.magenta('-'),
c.blue(' ... __ '), c.magenta('-'),
c.blue(' _\n'),
diff --git a/celery/bin/celery.py b/celery/bin/celery.py
index 3d0cf5d8f..57c243040 100644
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -8,7 +8,6 @@
"""
from __future__ import absolute_import, unicode_literals
-import anyjson
import numbers
import os
import sys
@@ -16,6 +15,8 @@
from functools import partial
from importlib import import_module
+from kombu.utils import json
+
from celery.five import string_t, values
from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE
from celery.utils import term
@@ -162,12 +163,12 @@ def run(self, name, *_, **kw):
# Positional args.
args = kw.get('args') or ()
if isinstance(args, string_t):
- args = anyjson.loads(args)
+ args = json.loads(args)
# Keyword args.
kwargs = kw.get('kwargs') or {}
if isinstance(kwargs, string_t):
- kwargs = anyjson.loads(kwargs)
+ kwargs = json.loads(kwargs)
# Expires can be int/float.
expires = kw.get('expires') or None
diff --git a/celery/events/__init__.py b/celery/events/__init__.py
index a4142f76a..93747cf56 100644
--- a/celery/events/__init__.py
+++ b/celery/events/__init__.py
@@ -13,7 +13,6 @@
import os
import time
import threading
-import warnings
from collections import deque
from contextlib import contextmanager
@@ -36,14 +35,6 @@
_TZGETTER = itemgetter('utcoffset', 'timestamp')
-W_YAJL = """
-anyjson is currently using the yajl library.
-This json implementation is broken, it severely truncates floats
-so timestamps will not work.
-
-Please uninstall yajl or force anyjson to use a different library.
-"""
-
CLIENT_CLOCK_SKEW = -1
@@ -151,12 +142,6 @@ def __init__(self, connection=None, hostname=None, enabled=True,
self.enable()
self.headers = {'hostname': self.hostname}
self.pid = os.getpid()
- self.warn_if_yajl()
-
- def warn_if_yajl(self):
- import anyjson
- if anyjson.implementation.name == 'yajl':
- warnings.warn(UserWarning(W_YAJL))
def __enter__(self):
return self
diff --git a/celery/five.py b/celery/five.py
index 5a272c9d6..4f0f3a4be 100644
--- a/celery/five.py
+++ b/celery/five.py
@@ -27,14 +27,6 @@
def Counter(): # noqa
return defaultdict(int)
-try:
- buffer_t = buffer
-except NameError: # pragma: no cover
- # Py3 does not have buffer, but we only need isinstance.
-
- class buffer_t(object): # noqa
- pass
-
############## py3k #########################################################
import sys
PY3 = sys.version_info[0] == 3
diff --git a/celery/loaders/base.py b/celery/loaders/base.py
index d73547aad..533530b1d 100644
--- a/celery/loaders/base.py
+++ b/celery/loaders/base.py
@@ -8,7 +8,6 @@
"""
from __future__ import absolute_import
-import anyjson
import imp as _imp
import importlib
import os
@@ -17,6 +16,7 @@
from datetime import datetime
+from kombu.utils import json
from kombu.utils import cached_property
from kombu.utils.encoding import safe_str
@@ -178,7 +178,7 @@ def find_module(self, module):
def cmdline_config_parser(
self, args, namespace='celery',
re_type=re.compile(r'\((\w+)\)'),
- extra_types={'json': anyjson.loads},
+ extra_types={'json': json.loads},
override_types={'tuple': 'json',
'list': 'json',
'dict': 'json'}):
diff --git a/celery/task/http.py b/celery/task/http.py
index 62b89b896..d3739b8ef 100644
--- a/celery/task/http.py
+++ b/celery/task/http.py
@@ -8,7 +8,6 @@
"""
from __future__ import absolute_import
-import anyjson
import sys
try:
@@ -17,6 +16,8 @@
from urllib import urlencode # noqa
from urlparse import urlparse, parse_qsl # noqa
+from kombu.utils import json
+
from celery import shared_task, __version__ as celery_version
from celery.five import items, reraise
from celery.utils.log import get_task_logger
@@ -62,7 +63,7 @@ class UnknownStatusError(InvalidResponseError):
"""The remote server gave an unknown status."""
-def extract_response(raw_response, loads=anyjson.loads):
+def extract_response(raw_response, loads=json.loads):
"""Extract the response text from a raw JSON response."""
if not raw_response:
raise InvalidResponseError('Empty response')
diff --git a/celery/tests/bin/test_celery.py b/celery/tests/bin/test_celery.py
index fbfdb62f6..573810eec 100644
--- a/celery/tests/bin/test_celery.py
+++ b/celery/tests/bin/test_celery.py
@@ -2,9 +2,10 @@
import sys
-from anyjson import dumps
from datetime import datetime
+from kombu.utils.json import dumps
+
from celery import __main__
from celery.platforms import EX_FAILURE, EX_USAGE, EX_OK
from celery.bin.base import Error
diff --git a/celery/tests/compat_modules/test_http.py b/celery/tests/compat_modules/test_http.py
index 08505f87e..c3a23b613 100644
--- a/celery/tests/compat_modules/test_http.py
+++ b/celery/tests/compat_modules/test_http.py
@@ -8,8 +8,8 @@
except ImportError: # py3k
from urllib.request import addinfourl # noqa
-from anyjson import dumps
from kombu.utils.encoding import from_utf8
+from kombu.utils.json import dumps
from celery.five import WhateverIO, items
from celery.task import http
diff --git a/celery/tests/compat_modules/test_sets.py b/celery/tests/compat_modules/test_sets.py
index c1d2c16fa..dc38d19a5 100644
--- a/celery/tests/compat_modules/test_sets.py
+++ b/celery/tests/compat_modules/test_sets.py
@@ -1,8 +1,9 @@
from __future__ import absolute_import
-import anyjson
import warnings
+from kombu.utils import json
+
from celery import uuid
from celery.result import TaskSetResult
from celery.task import Task
@@ -136,7 +137,7 @@ def test_is_JSON_serializable(self):
)
s.args = list(s.args) # tuples are not preserved
# but this doesn't matter.
- self.assertEqual(s, self.subtask(anyjson.loads(anyjson.dumps(s))))
+ self.assertEqual(s, self.subtask(json.loads(json.dumps(s))))
def test_repr(self):
s = self.MockTask.subtask((2, ), {'cache': True})
diff --git a/celery/worker/consumer.py b/celery/worker/consumer.py
index 71cf7c635..84bc8d9cb 100644
--- a/celery/worker/consumer.py
+++ b/celery/worker/consumer.py
@@ -26,6 +26,7 @@
from billiard.exceptions import RestartFreqExceeded
from kombu.async.semaphore import DummyLock
from kombu.common import QoS, ignore_errors
+from kombu.five import buffer_t, items, values
from kombu.syn import _detect_environment
from kombu.utils.compat import get_errno
from kombu.utils.encoding import safe_repr, bytes_t
@@ -35,7 +36,6 @@
from celery.app.trace import build_tracer
from celery.canvas import signature
from celery.exceptions import InvalidTaskError
-from celery.five import buffer_t, items, values
from celery.utils.functional import noop
from celery.utils.log import get_logger
from celery.utils.text import truncate
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py
index a4ed1cac2..801e58c3f 100644
--- a/celery/worker/strategy.py
+++ b/celery/worker/strategy.py
@@ -11,9 +11,9 @@
import logging
from kombu.async.timer import to_timestamp
+from kombu.five import buffer_t
from celery.exceptions import InvalidTaskError
-from celery.five import buffer_t
from celery.utils.log import get_logger
from celery.utils.timeutils import timezone
diff --git a/docs/faq.rst b/docs/faq.rst
index d1cfc0ddf..875798b6c 100644
--- a/docs/faq.rst
+++ b/docs/faq.rst
@@ -129,22 +129,9 @@ broker this is a natural dependency.
.. _`amqp`: http://pypi.python.org/pypi/amqp
-- `anyjson`_
-
-anyjson is an utility library to select the best possible
-JSON implementation.
-
-.. _`anyjson`: http://pypi.python.org/pypi/anyjson
-
-
.. note::
- For compatibility reasons additional packages may be installed
- if you are running on older Python versions,
- for example Python 2.6 depends on the ``importlib``,
- and ``ordereddict`` libraries.
-
- Also, to handle the dependencies for popular configuration
+ To handle the dependencies for popular configuration
choices Celery defines a number of "bundle" packages,
see :ref:`bundles`.
diff --git a/docs/userguide/remote-tasks.rst b/docs/userguide/remote-tasks.rst
index e5f4aa8c7..f9cfa76fb 100644
--- a/docs/userguide/remote-tasks.rst
+++ b/docs/userguide/remote-tasks.rst
@@ -59,7 +59,7 @@ With this information you could define a simple task in Django:
.. code-block:: python
from django.http import HttpResponse
- from anyjson import serialize
+ from json import dumps
def multiply(request):
@@ -67,7 +67,7 @@ With this information you could define a simple task in Django:
y = int(request.GET['y'])
result = x * y
response = {'status': 'success', 'retval': result}
- return HttpResponse(serialize(response), mimetype='application/json')
+ return HttpResponse(dumps(response), mimetype='application/json')
.. _webhook-rails-example:
diff --git a/examples/httpexample/views.py b/examples/httpexample/views.py
index 5069255e6..e1f4bf0f5 100644
--- a/examples/httpexample/views.py
+++ b/examples/httpexample/views.py
@@ -1,6 +1,6 @@
from django.http import HttpResponse
-from anyjson import dumps
+from json import dumps
def multiply(request):
diff --git a/funtests/benchmarks/bench_worker.py b/funtests/benchmarks/bench_worker.py
index 8663e8d7a..87f3615f0 100644
--- a/funtests/benchmarks/bench_worker.py
+++ b/funtests/benchmarks/bench_worker.py
@@ -9,13 +9,6 @@
USE_FAST_LOCALS='yes',
)
-import anyjson
-JSONIMP = os.environ.get('JSONIMP')
-if JSONIMP:
- anyjson.force_implementation(JSONIMP)
-
-print('anyjson implementation: {0!r}'.format(anyjson.implementation.name))
-
from celery import Celery, group
from celery.five import range
From 65a5d2866eb734c563105fd699d98c61f5a8bbc7 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 22:13:52 +0100
Subject: [PATCH 0158/1103] Adds Signature.__json__
---
celery/canvas.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/celery/canvas.py b/celery/canvas.py
index 4d8e713af..e664d5394 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -286,7 +286,10 @@ def __invert__(self):
def __reduce__(self):
# for serialization, the task type is lazily loaded,
# and not stored in the dict itself.
- return subtask, (dict(self), )
+ return signature, (dict(self), )
+
+ def __json__(self):
+ return dict(self)
def reprcall(self, *args, **kwargs):
args, kwargs, _ = self._merge(args, kwargs, {})
From bd8a989a46971ae94446e173e677f971320edbe8 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Tue, 20 May 2014 22:51:42 +0100
Subject: [PATCH 0159/1103] celery.five now extends amqp.five
---
celery/five.py | 172 ++-----------------------------------------------
1 file changed, 7 insertions(+), 165 deletions(-)
diff --git a/celery/five.py b/celery/five.py
index 4f0f3a4be..94a4ab8ca 100644
--- a/celery/five.py
+++ b/celery/five.py
@@ -10,159 +10,15 @@
"""
from __future__ import absolute_import
-__all__ = ['Counter', 'reload', 'UserList', 'UserDict', 'Queue', 'Empty',
- 'zip_longest', 'map', 'string', 'string_t',
- 'long_t', 'text_t', 'range', 'int_types', 'items', 'keys', 'values',
- 'nextfun', 'reraise', 'WhateverIO', 'with_metaclass',
- 'THREAD_TIMEOUT_MAX', 'format_d', 'class_property', 'reclassmethod',
- 'create_module', 'recreate_module', 'monotonic']
+__all__ = [
+ 'class_property', 'reclassmethod', 'create_module', 'recreate_module',
+]
-import io
-
-try:
- from collections import Counter
-except ImportError: # pragma: no cover
- from collections import defaultdict
-
- def Counter(): # noqa
- return defaultdict(int)
-
-############## py3k #########################################################
-import sys
-PY3 = sys.version_info[0] == 3
-
-try:
- reload = reload # noqa
-except NameError: # pragma: no cover
- from imp import reload # noqa
-
-try:
- from UserList import UserList # noqa
-except ImportError: # pragma: no cover
- from collections import UserList # noqa
-
-try:
- from UserDict import UserDict # noqa
-except ImportError: # pragma: no cover
- from collections import UserDict # noqa
-
-
-from kombu.five import monotonic
-
-if PY3: # pragma: no cover
- import builtins
-
- from queue import Queue, Empty
- from itertools import zip_longest
-
- map = map
- string = str
- string_t = str
- long_t = int
- text_t = str
- range = range
- int_types = (int, )
- _byte_t = bytes
-
- open_fqdn = 'builtins.open'
-
- def items(d):
- return d.items()
-
- def keys(d):
- return d.keys()
-
- def values(d):
- return d.values()
-
- def nextfun(it):
- return it.__next__
-
- exec_ = getattr(builtins, 'exec')
-
- def reraise(tp, value, tb=None):
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
-
-else:
- import __builtin__ as builtins # noqa
- from Queue import Queue, Empty # noqa
- from itertools import imap as map, izip_longest as zip_longest # noqa
- string = unicode # noqa
- string_t = basestring # noqa
- text_t = unicode # noqa
- long_t = long # noqa
- range = xrange # noqa
- int_types = (int, long) # noqa
- _byte_t = (str, bytes) # noqa
-
- open_fqdn = '__builtin__.open'
-
- def items(d): # noqa
- return d.iteritems()
-
- def keys(d): # noqa
- return d.iterkeys()
-
- def values(d): # noqa
- return d.itervalues()
-
- def nextfun(it): # noqa
- return it.next
-
- def exec_(code, globs=None, locs=None): # pragma: no cover
- """Execute code in a namespace."""
- if globs is None:
- frame = sys._getframe(1)
- globs = frame.f_globals
- if locs is None:
- locs = frame.f_locals
- del frame
- elif locs is None:
- locs = globs
- exec("""exec code in globs, locs""")
-
- exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")
-
-
-def with_metaclass(Type, skip_attrs={'__dict__', '__weakref__'}):
- """Class decorator to set metaclass.
-
- Works with both Python 2 and Python 3 and it does not add
- an extra class in the lookup order like ``six.with_metaclass`` does
- (that is -- it copies the original class instead of using inheritance).
-
- """
-
- def _clone_with_metaclass(Class):
- attrs = {key: value for key, value in items(vars(Class))
- if key not in skip_attrs}
- return Type(Class.__name__, Class.__bases__, attrs)
-
- return _clone_with_metaclass
-
-
-############## threading.TIMEOUT_MAX #######################################
-try:
- from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX
-except ImportError:
- THREAD_TIMEOUT_MAX = 1e10 # noqa
-
-############## format(int, ',d') ##########################
-
-if sys.version_info >= (2, 7): # pragma: no cover
- def format_d(i):
- return format(i, ',d')
-else: # pragma: no cover
- def format_d(i): # noqa
- s = '%d' % i
- groups = []
- while s and s[-1].isdigit():
- groups.append(s[-3:])
- s = s[:-3]
- return s + ','.join(reversed(groups))
+# extends amqp.five
+from amqp.five import * # noqa
+from amqp.five import __all__ as _all_five
+__all__ += _all_five
############## Module Generation ##########################
@@ -209,7 +65,6 @@ def _compat_periodic_task_decorator(*args, **kwargs):
from celery.task import periodic_task
return periodic_task(*args, **kwargs)
-
COMPAT_MODULES = {
'celery': {
'execute': {
@@ -368,16 +223,3 @@ def get_origins(defs):
for module, attrs in items(defs):
origins.update({attr: module for attr in attrs})
return origins
-
-
-_SIO_write = io.StringIO.write
-_SIO_init = io.StringIO.__init__
-
-
-class WhateverIO(io.StringIO):
-
- def __init__(self, v=None, *a, **kw):
- _SIO_init(self, v.decode() if isinstance(v, _byte_t) else v, *a, **kw)
-
- def write(self, data):
- _SIO_write(self, data.decode() if isinstance(data, _byte_t) else data)
From f90fce49964db2f6b93111ced6fff8296e454490 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 21 May 2014 13:03:15 +0100
Subject: [PATCH 0160/1103] kombu.utils.get_errno is now removed as it is not
needed on Py2.7
---
celery/concurrency/asynpool.py | 33 +++++++++++++++------------------
celery/platforms.py | 3 +--
celery/worker/consumer.py | 3 +--
3 files changed, 17 insertions(+), 22 deletions(-)
diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
index 70b89cef9..5dd6ac815 100644
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -41,7 +41,6 @@
from kombu.async import READ, WRITE, ERR
from kombu.serialization import pickle as _pickle
from kombu.utils import fxrange
-from kombu.utils.compat import get_errno
from kombu.utils.eventio import SELECT_BAD_FD
from celery.five import Counter, items, values
from celery.utils.log import get_logger
@@ -139,14 +138,14 @@ def _select(readers=None, writers=None, err=None, timeout=0):
r = list(set(r) | set(e))
return r, w, 0
except (select.error, socket.error) as exc:
- if get_errno(exc) == errno.EINTR:
+ if exc.errno == errno.EINTR:
return [], [], 1
- elif get_errno(exc) in SELECT_BAD_FD:
+ elif exc.errno in SELECT_BAD_FD:
for fd in readers | writers | err:
try:
select.select([fd], [], [], 0)
except (select.error, socket.error) as exc:
- if get_errno(exc) not in SELECT_BAD_FD:
+ if exc.errno not in SELECT_BAD_FD:
raise
readers.discard(fd)
writers.discard(fd)
@@ -196,7 +195,7 @@ def _recv_message(self, add_reader, fd, callback,
fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr,
)
except OSError as exc:
- if get_errno(exc) not in UNAVAIL:
+ if exc.errno not in UNAVAIL:
raise
yield
else:
@@ -218,7 +217,7 @@ def _recv_message(self, add_reader, fd, callback,
fd, bufv[Br:] if readcanbuf else bufv, body_size - Br,
)
except OSError as exc:
- if get_errno(exc) not in UNAVAIL:
+ if exc.errno not in UNAVAIL:
raise
yield
else:
@@ -722,7 +721,7 @@ def schedule_writes(ready_fds, shuffle=random.shuffle):
except StopIteration:
pass
except OSError as exc:
- if get_errno(exc) != errno.EBADF:
+ if exc.errno != errno.EBADF:
raise
else:
add_writer(ready_fd, cor)
@@ -765,7 +764,7 @@ def _write_job(proc, fd, job):
try:
Hw += send(header, Hw)
except Exception as exc:
- if get_errno(exc) not in UNAVAIL:
+ if getattr(exc, 'errno', None) not in UNAVAIL:
raise
# suspend until more data
errors += 1
@@ -781,7 +780,7 @@ def _write_job(proc, fd, job):
try:
Bw += send(body, Bw)
except Exception as exc:
- if get_errno(exc) not in UNAVAIL:
+ if getattr(exc, 'errno', None) not in UNAVAIL:
raise
# suspend until more data
errors += 1
@@ -830,7 +829,7 @@ def _write_ack(fd, ack, callback=None):
try:
Hw += send(header, Hw)
except Exception as exc:
- if get_errno(exc) not in UNAVAIL:
+ if getattr(exc, 'errno', None) not in UNAVAIL:
raise
yield
@@ -839,7 +838,7 @@ def _write_ack(fd, ack, callback=None):
try:
Bw += send(body, Bw)
except Exception as exc:
- if get_errno(exc) not in UNAVAIL:
+ if getattr(exc, 'errno', None) not in UNAVAIL:
raise
# suspend until more data
yield
@@ -1041,7 +1040,7 @@ def _stop_task_handler(task_handler):
try:
proc.inq.put(None)
except OSError as exc:
- if get_errno(exc) != errno.EBADF:
+ if exc.errno != errno.EBADF:
raise
def create_result_handler(self):
@@ -1092,14 +1091,12 @@ def process_flush_queues(self, proc):
try:
task = resq.recv()
except (OSError, IOError, EOFError) as exc:
- if get_errno(exc) == errno.EINTR:
+ _errno = getattr(exc, 'errno', None)
+ if _errno == errno.EINTR:
continue
- elif get_errno(exc) == errno.EAGAIN:
+ elif _errno == errno.EAGAIN:
break
- else:
- debug('got %r while flushing process %r',
- exc, proc, exc_info=1)
- if get_errno(exc) not in UNAVAIL:
+ elif _errno not in UNAVAIL:
debug('got %r while flushing process %r',
exc, proc, exc_info=1)
break
diff --git a/celery/platforms.py b/celery/platforms.py
index 651b8f5fa..0ddc3d6ae 100644
--- a/celery/platforms.py
+++ b/celery/platforms.py
@@ -24,7 +24,6 @@
from billiard import current_process
# fileno used to be in this module
from kombu.utils import maybe_fileno
-from kombu.utils.compat import get_errno
from kombu.utils.encoding import safe_str
from contextlib import contextmanager
@@ -531,7 +530,7 @@ def maybe_drop_privileges(uid=None, gid=None):
try:
setuid(0)
except OSError as exc:
- if get_errno(exc) != errno.EPERM:
+ if exc.errno != errno.EPERM:
raise
pass # Good: cannot restore privileges.
else:
diff --git a/celery/worker/consumer.py b/celery/worker/consumer.py
index 84bc8d9cb..6a3a56379 100644
--- a/celery/worker/consumer.py
+++ b/celery/worker/consumer.py
@@ -28,7 +28,6 @@
from kombu.common import QoS, ignore_errors
from kombu.five import buffer_t, items, values
from kombu.syn import _detect_environment
-from kombu.utils.compat import get_errno
from kombu.utils.encoding import safe_repr, bytes_t
from kombu.utils.limits import TokenBucket
@@ -271,7 +270,7 @@ def start(self):
try:
blueprint.start(self)
except self.connection_errors as exc:
- if isinstance(exc, OSError) and get_errno(exc) == errno.EMFILE:
+ if isinstance(exc, OSError) and exc.errno == errno.EMFILE:
raise # Too many open files
maybe_shutdown()
try:
From 4b40a7266b6e7ecd2cdc2b3230d189c331bb5d2f Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 21 May 2014 14:10:29 +0100
Subject: [PATCH 0161/1103] Fixes typo Publisher -> Producer
---
celery/tests/app/test_amqp.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/tests/app/test_amqp.py b/celery/tests/app/test_amqp.py
index 9ef9f572e..e4e8873a2 100644
--- a/celery/tests/app/test_amqp.py
+++ b/celery/tests/app/test_amqp.py
@@ -22,7 +22,7 @@ def test_accept_content(self):
)
-class test_PublisherPool(AppCase):
+class test_ProducerPool(AppCase):
def test_setup_nolimit(self):
self.app.conf.BROKER_POOL_LIMIT = None
From 91af06d50bbfb23ccd74ca24f2402a7b70e2847e Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 21 May 2014 14:11:43 +0100
Subject: [PATCH 0162/1103] Beat: Scheduler.Publisher renamed to .Producer, and
.publisher to .producer
---
celery/beat.py | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/celery/beat.py b/celery/beat.py
index 64ef932e7..5623dbf5f 100644
--- a/celery/beat.py
+++ b/celery/beat.py
@@ -174,13 +174,13 @@ class Scheduler(object):
logger = logger # compat
def __init__(self, app, schedule=None, max_interval=None,
- Publisher=None, lazy=False, sync_every_tasks=None, **kwargs):
+ Producer=None, lazy=False, sync_every_tasks=None, **kwargs):
self.app = app
self.data = maybe_evaluate({} if schedule is None else schedule)
self.max_interval = (max_interval
or app.conf.CELERYBEAT_MAX_LOOP_INTERVAL
or self.max_interval)
- self.Publisher = Publisher or app.amqp.TaskProducer
+ self.Producer = Producer or app.amqp.TaskProducer
self._heap = None
self.sync_every_tasks = (
app.conf.CELERYBEAT_SYNC_EVERY if sync_every_tasks is None
@@ -199,11 +199,11 @@ def install_default_entries(self, data):
'options': {'expires': 12 * 3600}}
self.update_from_dict(entries)
- def maybe_due(self, entry, publisher=None):
+ def maybe_due(self, entry, producer=None):
is_due, next_time_to_run = entry.is_due()
if is_due:
- self.apply_entry(entry, producer=publisher, advance=True)
+ self.apply_entry(entry, producer=producer, advance=True)
return next_time_to_run
def apply_entry(self, entry, producer=None):
@@ -234,7 +234,7 @@ def tick(self, event_t=event_t, min=min,
verify = heappop(H)
if verify is event:
next_entry = self.reserve(entry)
- self.apply_entry(entry, producer=self.publisher)
+ self.apply_entry(entry, producer=self.producer)
heappush(H, event_t(next_time_to_run, event[1], next_entry))
return 0
else:
@@ -355,8 +355,8 @@ def connection(self):
return self.app.connection()
@cached_property
- def publisher(self):
- return self.Publisher(self._ensure_connected())
+ def producer(self):
+ return self.Producer(self._ensure_connected())
@property
def info(self):
From 494f1d6e70dd708ab30c307f111b810bcbab886f Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 21 May 2014 14:25:02 +0100
Subject: [PATCH 0163/1103] Programs: multi must passhthrough %i and %I logfile
formats
---
celery/bin/multi.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/bin/multi.py b/celery/bin/multi.py
index 7bb90cf92..911366e53 100644
--- a/celery/bin/multi.py
+++ b/celery/bin/multi.py
@@ -508,7 +508,7 @@ def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''):
expand = partial(
node_format, nodename=nodename, N=shortname, d=hostname,
- h=nodename,
+ h=nodename, i='%i', I='%I',
)
argv = ([expand(cmd)] +
[format_opt(opt, expand(value))
From e99172d82ff65b69a1d48d3e27090c6bd7e1d4c7 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 21 May 2014 14:31:18 +0100
Subject: [PATCH 0164/1103] Default log format is now includes %I to avoid race
conditions (one logfile per child process)
---
celery/bin/multi.py | 6 +++---
docs/tutorials/daemonizing.rst | 19 ++++++++++++-------
extra/centos/celeryd | 4 ++--
extra/centos/celeryd.sysconfig | 6 ++++--
extra/generic-init.d/celeryd | 2 +-
extra/systemd/celery.conf | 2 +-
6 files changed, 23 insertions(+), 16 deletions(-)
diff --git a/celery/bin/multi.py b/celery/bin/multi.py
index 911366e53..e61b19e7c 100644
--- a/celery/bin/multi.py
+++ b/celery/bin/multi.py
@@ -16,13 +16,13 @@
# this. The abbreviation %n will be expanded to the current
# node name.
$ celery multi start Leslie -E --pidfile=/var/run/celery/%n.pid
- --logfile=/var/log/celery/%n.log
+ --logfile=/var/log/celery/%n%I.log
# You need to add the same arguments when you restart,
# as these are not persisted anywhere.
$ celery multi restart Leslie -E --pidfile=/var/run/celery/%n.pid
- --logfile=/var/run/celery/%n.log
+ --logfile=/var/run/celery/%n%I.log
# To stop the node, you need to specify the same pidfile.
$ celery multi stop Leslie --pidfile=/var/run/celery/%n.pid
@@ -252,7 +252,7 @@ def start(self, argv, cmd):
def with_detacher_default_options(self, p):
_setdefaultopt(p.options, ['--pidfile', '-p'], '%n.pid')
- _setdefaultopt(p.options, ['--logfile', '-f'], '%n.log')
+ _setdefaultopt(p.options, ['--logfile', '-f'], '%n%I.log')
p.options.setdefault(
'--cmd',
'-m {0}'.format(celery_exe('worker', '--detach')),
diff --git a/docs/tutorials/daemonizing.rst b/docs/tutorials/daemonizing.rst
index 6ba461ee0..311ceae85 100644
--- a/docs/tutorials/daemonizing.rst
+++ b/docs/tutorials/daemonizing.rst
@@ -56,7 +56,7 @@ must also export them (e.g. ``export DISPLAY=":0"``)
$ celery multi start worker1 \
--pidfile="$HOME/run/celery/%n.pid" \
- --logfile="$HOME/log/celery/%n.log"
+ --logfile="$HOME/log/celery/%n%I.log"
$ celery multi restart worker1 --pidfile="$HOME/run/celery/%n.pid"
@@ -97,7 +97,7 @@ This is an example configuration for a Python project.
CELERYD_OPTS="--time-limit=300 --concurrency=8"
# %n will be replaced with the first part of the nodename.
- CELERYD_LOG_FILE="/var/log/celery/%n.log"
+ CELERYD_LOG_FILE="/var/log/celery/%n%I.log"
CELERYD_PID_FILE="/var/run/celery/%n.pid"
# Workers should run as an unprivileged user.
@@ -156,7 +156,9 @@ Available options
Full path to the PID file. Default is /var/run/celery/%n.pid
* CELERYD_LOG_FILE
- Full path to the worker log file. Default is /var/log/celery/%n.log
+ Full path to the worker log file. Default is /var/log/celery/%n%I.log
+ **Note**: Using `%I` is important when using the prefork pool as having
+ multiple processes share the same log file will lead to race conditions.
* CELERYD_LOG_LEVEL
Worker log level. Default is INFO.
@@ -311,8 +313,10 @@ This is an example configuration for a Python project:
# Extra command-line arguments to the worker
CELERYD_OPTS="--time-limit=300 --concurrency=8"
- # %n will be replaced with the first part of the nodename.
- CELERYD_LOG_FILE="/var/log/celery/%n.log"
+ # - %n will be replaced with the first part of the nodename.
+ # - %I will be replaced with the current child process index
+ # and is important when using the prefork pool to avoid race conditions.
+ CELERYD_LOG_FILE="/var/log/celery/%n%I.log"
CELERYD_PID_FILE="/var/run/celery/%n.pid"
.. _generic-systemd-celeryd-django-example:
@@ -339,8 +343,9 @@ This is an example configuration for those using `django-celery`:
# Extra command-line arguments to the worker
CELERYD_OPTS="--time-limit=300 --concurrency=8"
- # %n will be replaced with the first part of the nodename.
- CELERYD_LOG_FILE="/var/log/celery/%n.log"
+ # - %n will be replaced with the first part of the nodename.
+ # - %I will be replaced with the current child process index
+ CELERYD_LOG_FILE="/var/log/celery/%n%I.log"
CELERYD_PID_FILE="/var/run/celery/%n.pid"
To add an environment variable such as DJANGO_SETTINGS_MODULE use the
diff --git a/extra/centos/celeryd b/extra/centos/celeryd
index c5e3b555c..1292cc84c 100644
--- a/extra/centos/celeryd
+++ b/extra/centos/celeryd
@@ -29,7 +29,7 @@
# Setting `prog` here allows you to symlink this init script, making it easy
# to run multiple processes on the system.
-# If we're invoked via SysV-style runlevel scripts we need to follow the
+# If we're invoked via SysV-style runlevel scripts we need to follow the
# link from rcX.d before working out the script name.
if [[ `dirname $0` == /etc/rc*.d ]]; then
target="$(readlink $0)"
@@ -48,7 +48,7 @@ source /etc/init.d/functions
SLEEP_SECONDS=5
DEFAULT_PID_FILE="/var/run/celery/$prog-%n.pid"
-DEFAULT_LOG_FILE="/var/log/celery/$prog-%n.log"
+DEFAULT_LOG_FILE="/var/log/celery/$prog-%n%I.log"
DEFAULT_LOG_LEVEL="INFO"
DEFAULT_NODES="celery"
DEFAULT_CELERYD="-m celery.bin.celeryd_detach"
diff --git a/extra/centos/celeryd.sysconfig b/extra/centos/celeryd.sysconfig
index c6f2d54c6..c243b8b57 100644
--- a/extra/centos/celeryd.sysconfig
+++ b/extra/centos/celeryd.sysconfig
@@ -16,8 +16,10 @@
# Create log/pid dirs, if they don't already exist
#CELERY_CREATE_DIRS=1
-# %n will be replaced with the nodename
-#CELERYD_LOG_FILE="/path/to/my_application/log/%n.log"
+# - %n will be replaced with the first part of the nodename.
+# - %I will be replaced with the current child process index
+# and is important when using the prefork pool to avoid race conditions.
+#CELERYD_LOG_FILE="/path/to/my_application/log/%n%I.log"
#CELERYD_PID_FILE="/var/run/celery/%n.pid"
# Workers run as an unprivileged user
diff --git a/extra/generic-init.d/celeryd b/extra/generic-init.d/celeryd
index 0fe704fdf..df918bca2 100755
--- a/extra/generic-init.d/celeryd
+++ b/extra/generic-init.d/celeryd
@@ -48,7 +48,7 @@ SCRIPT_NAME="$(basename "$SCRIPT_FILE")"
DEFAULT_USER="celery"
DEFAULT_PID_FILE="/var/run/celery/%n.pid"
-DEFAULT_LOG_FILE="/var/log/celery/%n.log"
+DEFAULT_LOG_FILE="/var/log/celery/%n%I.log"
DEFAULT_LOG_LEVEL="INFO"
DEFAULT_NODES="celery"
DEFAULT_CELERYD="-m celery worker --detach"
diff --git a/extra/systemd/celery.conf b/extra/systemd/celery.conf
index 08b90cf28..6662d43d5 100644
--- a/extra/systemd/celery.conf
+++ b/extra/systemd/celery.conf
@@ -6,7 +6,7 @@ CELERYD_NODES="worker"
CELERYD_OPTS=""
CELERY_BIN="/usr/bin/python2 -m celery"
CELERYD_PID_FILE="/var/run/celery/%n.pid"
-CELERYD_LOG_FILE="/var/log/celery/%n.log"
+CELERYD_LOG_FILE="/var/log/celery/%n%I.log"
CELERYD_LOG_LEVEL="INFO"
d /run/celery 0755 user users -
From a2deeb3ecd68ea5e9337c83762382508610902d1 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 21 May 2014 15:15:00 +0100
Subject: [PATCH 0165/1103] flakes
---
celery/beat.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/beat.py b/celery/beat.py
index 5623dbf5f..3579ecf4f 100644
--- a/celery/beat.py
+++ b/celery/beat.py
@@ -180,7 +180,7 @@ def __init__(self, app, schedule=None, max_interval=None,
self.max_interval = (max_interval
or app.conf.CELERYBEAT_MAX_LOOP_INTERVAL
or self.max_interval)
- self.Producer = Producer or app.amqp.TaskProducer
+ self.Producer = Producer or app.amqp.Producer
self._heap = None
self.sync_every_tasks = (
app.conf.CELERYBEAT_SYNC_EVERY if sync_every_tasks is None
From 2a6065514000be6af9c0501eaf4def93ad343e2e Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 21 May 2014 15:17:01 +0100
Subject: [PATCH 0166/1103] [Taskv2] callbacks, errbacks, chord and chain moved
to message body to avoid header limitations.
---
celery/app/amqp.py | 13 ++++---
celery/app/trace.py | 43 ++++++++++++----------
celery/apps/worker.py | 4 +--
celery/concurrency/prefork.py | 2 +-
celery/worker/request.py | 4 +--
docs/internals/protov2.rst | 68 +++++++++++++++--------------------
6 files changed, 66 insertions(+), 68 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index 4a4fbc15c..7fc59c43f 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -300,11 +300,7 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
'id': task_id,
'eta': eta,
'expires': expires,
- 'callbacks': callbacks,
- 'errbacks': errbacks,
- 'chain': None, # TODO
'group': group_id,
- 'chord': chord,
'retries': retries,
'timelimit': [time_limit, soft_time_limit],
'root_id': root_id,
@@ -314,7 +310,14 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
'correlation_id': task_id,
'reply_to': reply_to or '',
},
- body=(args, kwargs),
+ body=(
+ args, kwargs, {
+ 'callbacks': callbacks,
+ 'errbacks': errbacks,
+ 'chain': None, # TODO
+ 'chord': chord,
+ },
+ ),
sent_event={
'uuid': task_id,
'root': root_id,
diff --git a/celery/app/trace.py b/celery/app/trace.py
index d06ed9f48..c26961cde 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -466,36 +466,40 @@ def _trace_task_ret(name, uuid, request, body, content_type,
content_encoding, loads=loads_message, app=None,
**extra_request):
app = app or current_app._get_current_object()
- accept = prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT)
- args, kwargs = loads(body, content_type, content_encoding, accept=accept)
- request.update(args=args, kwargs=kwargs, **extra_request)
+ embed = None
+ if content_type:
+ accept = prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT)
+ args, kwargs, embed = loads(
+ body, content_type, content_encoding, accept=accept,
+ )
+ else:
+ args, kwargs = body
+ hostname = socket.gethostname()
+ request.update({
+ 'args': args, 'kwargs': kwargs,
+ 'hostname': hostname, 'is_eager': False,
+ }, **embed or {})
R, I, T, Rstr = trace_task(app.tasks[name],
uuid, args, kwargs, request, app=app)
return (1, R, T) if I else (0, Rstr, T)
trace_task_ret = _trace_task_ret
-def _fast_trace_task_v1(task, uuid, args, kwargs, request={}, _loc=_localized):
- # setup_worker_optimizations will point trace_task_ret to here,
- # so this is the function used in the worker.
- tasks, _ = _loc
- R, I, T, Rstr = tasks[task].__trace__(uuid, args, kwargs, request)[0]
- # exception instance if error, else result text
- return (1, R, T) if I else (0, Rstr, T)
-
-
def _fast_trace_task(task, uuid, request, body, content_type,
content_encoding, loads=loads_message, _loc=_localized,
hostname=None, **_):
- tasks, accept = _loc
+ embed = None
+ tasks, accept, hostname = _loc
if content_type:
- args, kwargs = loads(body, content_type, content_encoding,
- accept=accept)
+ args, kwargs, embed = loads(
+ body, content_type, content_encoding, accept=accept,
+ )
else:
args, kwargs = body
request.update({
- 'args': args, 'kwargs': kwargs, 'hostname': hostname,
- })
+ 'args': args, 'kwargs': kwargs,
+ 'hostname': hostname, 'is_eager': False,
+ }, **embed or {})
R, I, T, Rstr = tasks[task].__trace__(
uuid, args, kwargs, request,
)
@@ -515,9 +519,11 @@ def report_internal_error(task, exc):
del(_tb)
-def setup_worker_optimizations(app):
+def setup_worker_optimizations(app, hostname=None):
global trace_task_ret
+ hostname = hostname or socket.gethostname()
+
# make sure custom Task.__call__ methods that calls super
# will not mess up the request/task stack.
_install_stack_protection()
@@ -538,6 +544,7 @@ def setup_worker_optimizations(app):
_localized[:] = [
app._tasks,
prepare_accept_content(app.conf.CELERY_ACCEPT_CONTENT),
+ hostname,
]
trace_task_ret = _fast_trace_task
diff --git a/celery/apps/worker.py b/celery/apps/worker.py
index ae0c8d509..d3c220853 100644
--- a/celery/apps/worker.py
+++ b/celery/apps/worker.py
@@ -112,7 +112,7 @@ def safe_say(msg):
class Worker(WorkController):
def on_before_init(self, **kwargs):
- trace.setup_worker_optimizations(self.app)
+ trace.setup_worker_optimizations(self.app, self.hostname)
# this signal can be used to set up configuration for
# workers by name.
@@ -144,7 +144,7 @@ def on_init_blueprint(self):
self._custom_logging = self.setup_logging()
# apply task execution optimizations
# -- This will finalize the app!
- trace.setup_worker_optimizations(self.app)
+ trace.setup_worker_optimizations(self.app, self.hostname)
def on_start(self):
if not self._custom_logging and self.redirect_stdouts:
diff --git a/celery/concurrency/prefork.py b/celery/concurrency/prefork.py
index b579d0e10..ef4de4bed 100644
--- a/celery/concurrency/prefork.py
+++ b/celery/concurrency/prefork.py
@@ -68,7 +68,7 @@ def process_initializer(app, hostname):
hostname=hostname)
if os.environ.get('FORKED_BY_MULTIPROCESSING'):
# pool did execv after fork
- trace.setup_worker_optimizations(app)
+ trace.setup_worker_optimizations(app, hostname)
else:
app.set_current()
set_default_app(app)
diff --git a/celery/worker/request.py b/celery/worker/request.py
index 41b1d765e..7193a9013 100644
--- a/celery/worker/request.py
+++ b/celery/worker/request.py
@@ -179,7 +179,7 @@ def execute_using_pool(self, pool, **kwargs):
result = pool.apply_async(
trace_task_ret,
args=(self.name, task_id, self.request_dict, self.body,
- self.content_type, self.content_encoding, self.hostname),
+ self.content_type, self.content_encoding),
accept_callback=self.on_accepted,
timeout_callback=self.on_timeout,
callback=self.on_success,
@@ -444,7 +444,6 @@ def create_request_cls(base, task, pool, hostname, eventer,
default_soft_time_limit = task.soft_time_limit
apply_async = pool.apply_async
acks_late = task.acks_late
- std_kwargs = {'hostname': hostname, 'is_eager': False}
events = eventer and eventer.enabled
class Request(base):
@@ -461,7 +460,6 @@ def execute_using_pool(self, pool, **kwargs):
trace,
args=(self.name, task_id, self.request_dict, self.body,
self.content_type, self.content_encoding),
- kwargs=std_kwargs,
accept_callback=self.on_accepted,
timeout_callback=self.on_timeout,
callback=self.on_success,
diff --git a/docs/internals/protov2.rst b/docs/internals/protov2.rst
index 01b2e1325..626c333fb 100644
--- a/docs/internals/protov2.rst
+++ b/docs/internals/protov2.rst
@@ -21,7 +21,7 @@ Notes
- Body is only for language specific data.
- - Python stores args/kwargs in body.
+ - Python stores args/kwargs and embedded signatures in body.
- If a message uses raw encoding then the raw data
will be passed as a single argument to the function.
@@ -43,7 +43,7 @@ Notes
when sending the next message::
execute_task(message)
- chain = message.headers['chain']
+ chain = embed['chain']
if chain:
sig = maybe_signature(chain.pop())
sig.apply_async(chain=chain)
@@ -74,16 +74,6 @@ Notes
return fun(*args, **kwargs)
-
-Undecided
----------
-
-- May consider moving callbacks/errbacks/chain into body.
-
- Will huge lists in headers cause overhead?
- The downside of keeping them in the body is that intermediates
- won't be able to introspect these values.
-
Definition
==========
@@ -93,35 +83,40 @@ Definition
# 'class' header existing means protocol is v2
properties = {
- 'correlation_id': (uuid)task_id,
- 'content_type': (string)mime,
- 'content_encoding': (string)encoding,
+ 'correlation_id': uuid task_id,
+ 'content_type': string mimetype,
+ 'content_encoding': string encoding,
# optional
- 'reply_to': (string)queue_or_url,
+ 'reply_to': string queue_or_url,
}
headers = {
- 'lang': (string)'py'
- 'task': (string)task,
- 'id': (uuid)task_id,
- 'root_id': (uuid)root_id,
- 'parent_id': (uuid)parent_id,
+ 'lang': string 'py'
+ 'task': string task,
+ 'id': uuid task_id,
+ 'root_id': uuid root_id,
+ 'parent_id': uuid parent_id,
+ 'group': uuid group_id,
# optional
- 'meth': (string)unused,
- 'shadow': (string)replace_name,
- 'eta': (iso8601)eta,
- 'expires'; (iso8601)expires,
- 'callbacks': (list)Signature,
- 'errbacks': (list)Signature,
- 'chain': (list)Signature, # non-recursive, reversed list of signatures
- 'group': (uuid)group_id,
- 'chord': (uuid)chord_id,
- 'retries': (int)retries,
- 'timelimit': (tuple)(soft, hard),
+ 'meth': string method_name,
+ 'shadow': string alias_name,
+ 'eta': iso8601 eta,
+ 'expires'; iso8601 expires,
+ 'retries': int retries,
+ 'timelimit': (soft, hard),
}
- body = (args, kwargs)
+ body = (
+ object[] args,
+ Mapping kwargs,
+ Mapping embed {
+ 'callbacks': Signature[] callbacks,
+ 'errbacks': Signature[] errbacks,
+ 'chain': Signature[] chain,
+ 'chord': Signature chord_callback,
+ }
+ )
Example
=======
@@ -132,15 +127,10 @@ Example
task_id = uuid()
basic_publish(
- message=json.dumps([[2, 2], {}]),
+ message=json.dumps(([2, 2], {}, None),
application_headers={
'lang': 'py',
'task': 'proj.tasks.add',
- 'chain': [
- # reversed chain list
- {'task': 'proj.tasks.add', 'args': (8, )},
- {'task': 'proj.tasks.add', 'args': (4, )},
- ]
}
properties={
'correlation_id': task_id,
From 24a4a3dc796fc13bd48a55c70229fdbb69290463 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 21 May 2014 15:25:34 +0100
Subject: [PATCH 0167/1103] Task protocol v2 document moved into single
protocol document
---
docs/internals/index.rst | 1 -
docs/internals/protocol.rst | 189 ++++++++++++++++++++++++++++++++----
docs/internals/protov2.rst | 140 --------------------------
3 files changed, 169 insertions(+), 161 deletions(-)
delete mode 100644 docs/internals/protov2.rst
diff --git a/docs/internals/index.rst b/docs/internals/index.rst
index d10ed013e..4521a22fa 100644
--- a/docs/internals/index.rst
+++ b/docs/internals/index.rst
@@ -14,6 +14,5 @@
deprecation
worker
protocol
- protov2
app-overview
reference/index
diff --git a/docs/internals/protocol.rst b/docs/internals/protocol.rst
index 6b7360b31..9a12ba2e1 100644
--- a/docs/internals/protocol.rst
+++ b/docs/internals/protocol.rst
@@ -1,16 +1,174 @@
+.. _message-protocol:
+
+===================
+ Message Protocol
+===================
+
+.. contents::
+ :local:
+
+.. _message-protocol-task:
.. _internals-task-message-protocol:
+Task messages
+=============
+
+.. _message-protocol-task-v2:
+
+Version 2
+---------
+
+Definition
+~~~~~~~~~~
+
+.. code-block:: python
+
+ # protocol v2 implies UTC=True
+ # 'class' header existing means protocol is v2
+
+ properties = {
+ 'correlation_id': uuid task_id,
+ 'content_type': string mimetype,
+ 'content_encoding': string encoding,
+
+ # optional
+ 'reply_to': string queue_or_url,
+ }
+ headers = {
+ 'lang': string 'py'
+ 'task': string task,
+ 'id': uuid task_id,
+ 'root_id': uuid root_id,
+ 'parent_id': uuid parent_id,
+ 'group': uuid group_id,
+
+ # optional
+ 'meth': string method_name,
+ 'shadow': string alias_name,
+ 'eta': iso8601 eta,
+ 'expires'; iso8601 expires,
+ 'retries': int retries,
+ 'timelimit': (soft, hard),
+ }
+
+ body = (
+ object[] args,
+ Mapping kwargs,
+ Mapping embed {
+ 'callbacks': Signature[] callbacks,
+ 'errbacks': Signature[] errbacks,
+ 'chain': Signature[] chain,
+ 'chord': Signature chord_callback,
+ }
+ )
+
+Example
+~~~~~~~
+
+This example sends a task message using version 2 of the protocol:
+
+.. code-block:: python
+
+ # chain: add(add(add(2, 2), 4), 8) == 2 + 2 + 4 + 8
+
+ task_id = uuid()
+ basic_publish(
+ message=json.dumps(([2, 2], {}, None),
+ application_headers={
+ 'lang': 'py',
+ 'task': 'proj.tasks.add',
+ }
+ properties={
+ 'correlation_id': task_id,
+ 'content_type': 'application/json',
+ 'content_encoding': 'utf-8',
+ }
+ )
+
+Changes from version 1
+~~~~~~~~~~~~~~~~~~~~~~
+
+- Protocol version detected by the presence of a ``task`` message header.
+
+- Support for multiple languages via the ``lang`` header.
+
+ Worker may redirect the message to a worker that supports
+ the language.
+
+- Metadata moved to headers.
+
+ This means that workers/intermediates can inspect the message
+ and make decisions based on the headers without decoding
+ the payload (which may be language specific, e.g. serialized by the
+ Python specific pickle serializer).
+
+- Body is only for language specific data.
+
+ - Python stores args/kwargs and embedded signatures in body.
+
+ - If a message uses raw encoding then the raw data
+ will be passed as a single argument to the function.
+
+ - Java/C, etc. can use a thrift/protobuf document as the body
+
+- Dispatches to actor based on ``task``, ``meth`` headers
+
+ ``meth`` is unused by python, but may be used in the future
+ to specify class+method pairs.
+
+- Chain gains a dedicated field.
+
+ Reducing the chain into a recursive ``callbacks`` argument
+ causes problems when the recursion limit is exceeded.
+
+ This is fixed in the new message protocol by specifying
+ a list of signatures, each task will then pop a task off the list
+ when sending the next message::
+
+ execute_task(message)
+ chain = embed['chain']
+ if chain:
+ sig = maybe_signature(chain.pop())
+ sig.apply_async(chain=chain)
+
+- ``correlation_id`` replaces ``task_id`` field.
+
+- ``root_id`` and ``parent_id`` fields helps keep track of workflows.
+
+- ``shadow`` lets you specify a different name for logs, monitors
+ can be used for e.g. meta tasks that calls any function::
+
+ from celery.utils.imports import qualname
+
+ class PickleTask(Task):
+ abstract = True
+
+ def unpack_args(self, fun, args=()):
+ return fun, args
+
+ def apply_async(self, args, kwargs, **options):
+ fun, real_args = self.unpack_args(*args)
+ return super(PickleTask, self).apply_async(
+ (fun, real_args, kwargs), shadow=qualname(fun), **options
+ )
+
+ @app.task(base=PickleTask)
+ def call(fun, args, kwargs):
+ return fun(*args, **kwargs)
+
+
+.. _message-protocol-task-v1:
.. _task-message-protocol-v1:
-=======================
- Task Messages
-=======================
+Version 1
+=========
-.. contents::
- :local:
+In version 1 of the protocol all fields are stored in the message body,
+which means workers and intermediate consumers must deserialize the payload
+to read the fields.
-Message format
-==============
+Message Body
+~~~~~~~~~~~~
* task
:`string`:
@@ -56,15 +214,6 @@ Message format
will be expired when the message is received and the expiration date
has been exceeded.
-Extensions
-==========
-
-Extensions are additional keys in the message body that the worker may or
-may not support. If the worker finds an extension key it doesn't support
-it should optimally reject the message so another worker gets a chance
-to process it.
-
-
* taskset
:`string`:
@@ -116,9 +265,9 @@ to process it.
Example message
-===============
+~~~~~~~~~~~~~~~
-This is an example invocation of the `celery.task.PingTask` task in JSON
+This is an example invocation of a `celery.task.ping` task in JSON
format:
.. code-block:: javascript
@@ -130,8 +279,8 @@ format:
"retries": 0,
"eta": "2009-11-17T12:30:56.527191"}
-Serialization
-=============
+Task Serialization
+------------------
Several types of serialization formats are supported using the
`content_type` message header.
diff --git a/docs/internals/protov2.rst b/docs/internals/protov2.rst
deleted file mode 100644
index 626c333fb..000000000
--- a/docs/internals/protov2.rst
+++ /dev/null
@@ -1,140 +0,0 @@
-.. _protov2draft:
-
-========================================
- Task Message Protocol v2 (Draft Spec.)
-========================================
-
-Notes
-=====
-
-- Support for multiple languages via the ``lang`` header.
-
- Worker may redirect the message to a worker that supports
- the language.
-
-- Metadata moved to headers.
-
- This means that workers/intermediates can inspect the message
- and make decisions based on the headers without decoding
- the payload (which may be language specific, e.g. serialized by the
- Python specific pickle serializer).
-
-- Body is only for language specific data.
-
- - Python stores args/kwargs and embedded signatures in body.
-
- - If a message uses raw encoding then the raw data
- will be passed as a single argument to the function.
-
- - Java/C, etc. can use a thrift/protobuf document as the body
-
-- Dispatches to actor based on ``task``, ``meth`` headers
-
- ``meth`` is unused by python, but may be used in the future
- to specify class+method pairs.
-
-- Chain gains a dedicated field.
-
- Reducing the chain into a recursive ``callbacks`` argument
- causes problems when the recursion limit is exceeded.
-
- This is fixed in the new message protocol by specifying
- a list of signatures, each task will then pop a task off the list
- when sending the next message::
-
- execute_task(message)
- chain = embed['chain']
- if chain:
- sig = maybe_signature(chain.pop())
- sig.apply_async(chain=chain)
-
-- ``correlation_id`` replaces ``task_id`` field.
-
-- ``root_id`` and ``parent_id`` fields helps keep track of workflows.
-
-- ``shadow`` lets you specify a different name for logs, monitors
- can be used for e.g. meta tasks that calls any function::
-
- from celery.utils.imports import qualname
-
- class PickleTask(Task):
- abstract = True
-
- def unpack_args(self, fun, args=()):
- return fun, args
-
- def apply_async(self, args, kwargs, **options):
- fun, real_args = self.unpack_args(*args)
- return super(PickleTask, self).apply_async(
- (fun, real_args, kwargs), shadow=qualname(fun), **options
- )
-
- @app.task(base=PickleTask)
- def call(fun, args, kwargs):
- return fun(*args, **kwargs)
-
-
-Definition
-==========
-
-.. code-block:: python
-
- # protocol v2 implies UTC=True
- # 'class' header existing means protocol is v2
-
- properties = {
- 'correlation_id': uuid task_id,
- 'content_type': string mimetype,
- 'content_encoding': string encoding,
-
- # optional
- 'reply_to': string queue_or_url,
- }
- headers = {
- 'lang': string 'py'
- 'task': string task,
- 'id': uuid task_id,
- 'root_id': uuid root_id,
- 'parent_id': uuid parent_id,
- 'group': uuid group_id,
-
- # optional
- 'meth': string method_name,
- 'shadow': string alias_name,
- 'eta': iso8601 eta,
- 'expires'; iso8601 expires,
- 'retries': int retries,
- 'timelimit': (soft, hard),
- }
-
- body = (
- object[] args,
- Mapping kwargs,
- Mapping embed {
- 'callbacks': Signature[] callbacks,
- 'errbacks': Signature[] errbacks,
- 'chain': Signature[] chain,
- 'chord': Signature chord_callback,
- }
- )
-
-Example
-=======
-
-.. code-block:: python
-
- # chain: add(add(add(2, 2), 4), 8) == 2 + 2 + 4 + 8
-
- task_id = uuid()
- basic_publish(
- message=json.dumps(([2, 2], {}, None),
- application_headers={
- 'lang': 'py',
- 'task': 'proj.tasks.add',
- }
- properties={
- 'correlation_id': task_id,
- 'content_type': 'application/json',
- 'content_encoding': 'utf-8',
- }
- )
From b3cd3fcec4e6be0baa68b7ec2fc62c53e7d0404d Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Wed, 21 May 2014 16:01:29 +0100
Subject: [PATCH 0168/1103] Tests passing for beat
---
celery/beat.py | 30 +++++++++++++++---------------
celery/tests/app/test_beat.py | 12 +++---------
2 files changed, 18 insertions(+), 24 deletions(-)
diff --git a/celery/beat.py b/celery/beat.py
index 3579ecf4f..d316ac251 100644
--- a/celery/beat.py
+++ b/celery/beat.py
@@ -199,13 +199,6 @@ def install_default_entries(self, data):
'options': {'expires': 12 * 3600}}
self.update_from_dict(entries)
- def maybe_due(self, entry, producer=None):
- is_due, next_time_to_run = entry.is_due()
-
- if is_due:
- self.apply_entry(entry, producer=producer, advance=True)
- return next_time_to_run
-
def apply_entry(self, entry, producer=None):
info('Scheduler: Sending due task %s (%s)', entry.name, entry.task)
try:
@@ -216,20 +209,26 @@ def apply_entry(self, entry, producer=None):
else:
debug('%s sent. id->%s', entry.task, result.id)
+ def is_due(self, entry):
+ return entry.is_due()
+
def tick(self, event_t=event_t, min=min,
- heappop=heapq.heappop, heappush=heapq.heappush):
+ heappop=heapq.heappop, heappush=heapq.heappush,
+ heapify=heapq.heapify):
"""Run a tick, that is one iteration of the scheduler.
Executes all due tasks.
"""
+ max_interval = self.max_interval
H = self._heap
if H is None:
- H = self._heap = [event_t(e.is_due()[1], 5, e)
+ H = self._heap = [event_t(e.is_due()[1] or 0, 5, e)
for e in values(self.schedule)]
+ heapify(H)
event = H[0]
entry = event[2]
- is_due, next_time_to_run = entry.is_due()
+ is_due, next_time_to_run = self.is_due(entry)
if is_due:
verify = heappop(H)
if verify is event:
@@ -239,8 +238,8 @@ def tick(self, event_t=event_t, min=min,
return 0
else:
heappush(H, verify)
- return min(verify[0], self.max_interval)
- return min(next_time_to_run, self.max_interval)
+ return min(verify[0], max_interval)
+ return min(next_time_to_run or max_interval, max_interval)
def should_sync(self):
return (
@@ -477,9 +476,10 @@ def start(self, embedded_process=False):
try:
while not self._is_shutdown.is_set():
interval = self.scheduler.tick()
- debug('beat: Waking up %s.',
- humanize_seconds(interval, prefix='in '))
- time.sleep(interval)
+ if interval:
+ debug('beat: Waking up %s.',
+ humanize_seconds(interval, prefix='in '))
+ time.sleep(interval)
except (KeyboardInterrupt, SystemExit):
self._is_shutdown.set()
finally:
diff --git a/celery/tests/app/test_beat.py b/celery/tests/app/test_beat.py
index 62310805a..04a610df0 100644
--- a/celery/tests/app/test_beat.py
+++ b/celery/tests/app/test_beat.py
@@ -123,7 +123,7 @@ def send_task(self, *args, **kwargs):
class mSchedulerRuntimeError(mScheduler):
- def maybe_due(self, *args, **kwargs):
+ def is_due(self, *args, **kwargs):
raise RuntimeError('dict modified while itervalues')
@@ -273,22 +273,16 @@ def test_due_tick(self):
schedule=always_due,
args=(1, 2),
kwargs={'foo': 'bar'})
- self.assertEqual(scheduler.tick(), 1)
+ self.assertEqual(scheduler.tick(), 0)
@patch('celery.beat.error')
def test_due_tick_SchedulingError(self, error):
scheduler = mSchedulerSchedulingError(app=self.app)
scheduler.add(name='test_due_tick_SchedulingError',
schedule=always_due)
- self.assertEqual(scheduler.tick(), 1)
+ self.assertEqual(scheduler.tick(), 0)
self.assertTrue(error.called)
- def test_due_tick_RuntimeError(self):
- scheduler = mSchedulerRuntimeError(app=self.app)
- scheduler.add(name='test_due_tick_RuntimeError',
- schedule=always_due)
- self.assertEqual(scheduler.tick(), scheduler.max_interval)
-
def test_pending_tick(self):
scheduler = mScheduler(app=self.app)
scheduler.add(name='test_pending_tick',
From b21189f0f6516f6b162a5a1e8cd8ff9ca16e5554 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 22 May 2014 13:14:05 +0100
Subject: [PATCH 0169/1103] Enables universal wheel
---
setup.cfg | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/setup.cfg b/setup.cfg
index 2a032e4d1..68aa7cdd1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -13,3 +13,9 @@ upload-dir = docs/.build/html
requires = pytz >= 2011b
billiard >= 3.3.0.17
kombu >= 3.0.15
+
+[wheel]
+universal = 1
+
+[egg_info]
+tag_date = true
From bc31507d26267c9bbf93f4eb6470a0bc3f141ba8 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 22 May 2014 13:21:39 +0100
Subject: [PATCH 0170/1103] setup.py cleanup
---
setup.py | 46 +++++++++++++++++-----------------------------
1 file changed, 17 insertions(+), 29 deletions(-)
diff --git a/setup.py b/setup.py
index 2767346f0..6a8a370c2 100644
--- a/setup.py
+++ b/setup.py
@@ -1,17 +1,7 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-try:
- from setuptools import setup, find_packages
- from setuptools.command.test import test
- is_setuptools = True
-except ImportError:
- raise
- from ez_setup import use_setuptools
- use_setuptools()
- from setuptools import setup, find_packages # noqa
- from setuptools.command.test import test # noqa
- is_setuptools = False
+from setuptools import setup, find_packages
import os
import sys
@@ -22,6 +12,8 @@
if sys.version_info < (2, 7):
raise Exception('Celery 3.2 requires Python 2.7 or higher.')
+# -*- Upgrading from older versions -*-
+
downgrade_packages = [
'celery.app.task',
]
@@ -53,6 +45,9 @@
finally:
sys.path[:] = orig_path
+PY3 = sys.version_info[0] == 3
+JYTHON = sys.platform.startswith('java')
+PYPY = hasattr(sys, 'pypy_version_info')
NAME = 'celery'
entrypoints = {}
@@ -75,16 +70,9 @@
Programming Language :: Python :: Implementation :: PyPy
Programming Language :: Python :: Implementation :: Jython
Operating System :: OS Independent
- Operating System :: POSIX
- Operating System :: Microsoft :: Windows
- Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
-PY3 = sys.version_info[0] == 3
-JYTHON = sys.platform.startswith('java')
-PYPY = hasattr(sys, 'pypy_version_info')
-
# -*- Distribution Meta -*-
import re
@@ -123,8 +111,6 @@ def add_doc(m):
# -*- Installation Requires -*-
-py_version = sys.version_info
-
def strip_comments(l):
return l.split('#', 1)[0].strip()
@@ -165,15 +151,16 @@ def reqs(*f):
'celeryd-multi = celery.__main__:_compat_multi',
])
-if is_setuptools:
- extras = lambda *p: reqs('extras', *p)
- # Celery specific
- specific_list = ['auth', 'cassandra', 'memcache', 'couchbase', 'threads',
- 'eventlet', 'gevent', 'msgpack', 'yaml', 'redis',
- 'mongodb', 'sqs', 'couchdb', 'beanstalk', 'zookeeper',
- 'zeromq', 'sqlalchemy', 'librabbitmq', 'pyro', 'slmq']
- extras_require = dict((x, extras(x + '.txt')) for x in specific_list)
- extra['extras_require'] = extras_require
+# -*- Extras -*-
+
+extras = lambda *p: reqs('extras', *p)
+# Celery specific
+specific_list = ['auth', 'cassandra', 'memcache', 'couchbase', 'threads',
+ 'eventlet', 'gevent', 'msgpack', 'yaml', 'redis',
+ 'mongodb', 'sqs', 'couchdb', 'beanstalk', 'zookeeper',
+ 'zeromq', 'sqlalchemy', 'librabbitmq', 'pyro', 'slmq']
+extras_require = dict((x, extras(x + '.txt')) for x in specific_list)
+extra['extras_require'] = extras_require
# -*- %%% -*-
@@ -187,6 +174,7 @@ def reqs(*f):
platforms=['any'],
license='BSD',
packages=find_packages(exclude=['ez_setup', 'tests', 'tests.*']),
+ include_package_data=False,
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
From 2a5a1409558950f1358b052a6f5b4e01884d8d67 Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 22 May 2014 13:49:23 +0100
Subject: [PATCH 0171/1103] Tox: Adds docs target
---
tox.ini | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/tox.ini b/tox.ini
index a15e36635..9cfbd75ce 100644
--- a/tox.ini
+++ b/tox.ini
@@ -49,3 +49,9 @@ setenv = C_DEBUG_TEST = 1
commands = {toxinidir}/extra/release/removepyc.sh {toxinidir}
pip install -U -r{toxinidir}/requirements/dev.txt
nosetests -xsv --with-coverage --cover-inclusive --cover-erase []
+
+[testenv:docs]
+deps = -r{toxinidir}/requirements/docs.txt
+commands =
+ pip install -U -r{toxinidir}/requirements/dev.txt
+ sphinx-build -W -b linkcheck -d {envtmpdir}/doctrees docs docs/_build/linkcheck
From 506a0817ac553349715c371ffc826dd6aadbecbd Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 22 May 2014 13:49:31 +0100
Subject: [PATCH 0172/1103] Fixes doc problems
---
Changelog | 2 +-
docs/.templates/page.html | 2 +-
docs/getting-started/brokers/redis.rst | 91 ++++++++++++++------------
docs/internals/protocol.rst | 4 +-
docs/reference/celery.rst | 6 +-
docs/userguide/signals.rst | 10 +--
6 files changed, 60 insertions(+), 55 deletions(-)
diff --git a/Changelog b/Changelog
index 7c7e35165..c523701e4 100644
--- a/Changelog
+++ b/Changelog
@@ -109,7 +109,7 @@ new in Celery 3.1.
Celery), so if you do enable it then make sure you do so on all
nodes.
- See :ref:`redis-caveats-fanout-patterns`.
+ See :ref:`redis-caveat-fanout-patterns`.
This will be the default in Celery 3.2.
diff --git a/docs/.templates/page.html b/docs/.templates/page.html
index e4d1c2132..7562de304 100644
--- a/docs/.templates/page.html
+++ b/docs/.templates/page.html
@@ -7,7 +7,7 @@
This document is for Celery's development version, which can be
significantly different from previous releases. Get old docs here:
- 3.0.
+ 3.1.
{% else %}
diff --git a/docs/getting-started/brokers/redis.rst b/docs/getting-started/brokers/redis.rst
index 6a1d6e31f..543f4ee90 100644
--- a/docs/getting-started/brokers/redis.rst
+++ b/docs/getting-started/brokers/redis.rst
@@ -69,72 +69,77 @@ Caveats
.. _redis-caveat-fanout-prefix:
-- Broadcast messages will be seen by all virtual hosts by default.
+Fanout prefix
+-------------
- You have to set a transport option to prefix the messages so that
- they will only be received by the active virtual host::
+Broadcast messages will be seen by all virtual hosts by default.
- BROKER_TRANSPORT_OPTIONS = {'fanout_prefix': True}
+You have to set a transport option to prefix the messages so that
+they will only be received by the active virtual host::
- Note that you will not be able to communicate with workers running older
- versions or workers that does not have this setting enabled.
+ BROKER_TRANSPORT_OPTIONS = {'fanout_prefix': True}
- This setting will be the default in the future, so better to migrate
- sooner rather than later.
+Note that you will not be able to communicate with workers running older
+versions or workers that does not have this setting enabled.
+
+This setting will be the default in the future, so better to migrate
+sooner rather than later.
.. _redis-caveat-fanout-patterns:
-- Workers will receive all task related events by default.
+Fanout patterns
+---------------
- To avoid this you must set the ``fanout_patterns`` fanout option so that
- the workers may only subscribe to worker related events::
+Workers will receive all task related events by default.
- BROKER_TRANSPORT_OPTIONS = {'fanout_patterns': True}
+To avoid this you must set the ``fanout_patterns`` fanout option so that
+the workers may only subscribe to worker related events::
- Note that this change is backward incompatible so all workers in the
- cluster must have this option enabled, or else they will not be able to
- communicate.
+ BROKER_TRANSPORT_OPTIONS = {'fanout_patterns': True}
- This option will be enabled by default in the future.
+Note that this change is backward incompatible so all workers in the
+cluster must have this option enabled, or else they will not be able to
+communicate.
-- If a task is not acknowledged within the :ref:`redis-visibility_timeout`
- the task will be redelivered to another worker and executed.
+This option will be enabled by default in the future.
- This causes problems with ETA/countdown/retry tasks where the
- time to execute exceeds the visibility timeout; in fact if that
- happens it will be executed again, and again in a loop.
+Visibility timeout
+------------------
- So you have to increase the visibility timeout to match
- the time of the longest ETA you are planning to use.
+If a task is not acknowledged within the :ref:`redis-visibility_timeout`
+the task will be redelivered to another worker and executed.
- Note that Celery will redeliver messages at worker shutdown,
- so having a long visibility timeout will only delay the redelivery
- of 'lost' tasks in the event of a power failure or forcefully terminated
- workers.
+This causes problems with ETA/countdown/retry tasks where the
+time to execute exceeds the visibility timeout; in fact if that
+happens it will be executed again, and again in a loop.
- Periodic tasks will not be affected by the visibility timeout,
- as this is a concept separate from ETA/countdown.
+So you have to increase the visibility timeout to match
+the time of the longest ETA you are planning to use.
- You can increase this timeout by configuring a transport option
- with the same name::
+Note that Celery will redeliver messages at worker shutdown,
+so having a long visibility timeout will only delay the redelivery
+of 'lost' tasks in the event of a power failure or forcefully terminated
+workers.
- BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 43200}
+Periodic tasks will not be affected by the visibility timeout,
+as this is a concept separate from ETA/countdown.
- The value must be an int describing the number of seconds.
+You can increase this timeout by configuring a transport option
+with the same name::
+ BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 43200}
-- Monitoring events (as used by flower and other tools) are global
- and is not affected by the virtual host setting.
+The value must be an int describing the number of seconds.
- This is caused by a limitation in Redis. The Redis PUB/SUB channels
- are global and not affected by the database number.
+Key eviction
+------------
-- Redis may evict keys from the database in some situations
+Redis may evict keys from the database in some situations
- If you experience an error like::
+If you experience an error like::
- InconsistencyError, Probably the key ('_kombu.binding.celery') has been
- removed from the Redis database.
+ InconsistencyError, Probably the key ('_kombu.binding.celery') has been
+ removed from the Redis database.
- you may want to configure the redis-server to not evict keys by setting
- the ``timeout`` parameter to 0.
+you may want to configure the redis-server to not evict keys by setting
+the ``timeout`` parameter to 0 in the redis configuration file.
diff --git a/docs/internals/protocol.rst b/docs/internals/protocol.rst
index 9a12ba2e1..c51aa396f 100644
--- a/docs/internals/protocol.rst
+++ b/docs/internals/protocol.rst
@@ -161,13 +161,13 @@ Changes from version 1
.. _task-message-protocol-v1:
Version 1
-=========
+---------
In version 1 of the protocol all fields are stored in the message body,
which means workers and intermediate consumers must deserialize the payload
to read the fields.
-Message Body
+Message body
~~~~~~~~~~~~
* task
diff --git a/docs/reference/celery.rst b/docs/reference/celery.rst
index 0363c446b..75541f059 100644
--- a/docs/reference/celery.rst
+++ b/docs/reference/celery.rst
@@ -382,15 +382,15 @@ and creating Celery applications.
Finalizes the app by loading built-in tasks,
and evaluating pending task decorators
- .. signal:: on_configure
+ .. data:: on_configure
Signal sent when app is loading configuration.
- .. signal:: on_after_configure
+ .. data:: on_after_configure
Signal sent after app has prepared the configuration.
- .. signal:: on_after_finalize
+ .. data:: on_after_finalize
Signal sent after app has been finalized.
diff --git a/docs/userguide/signals.rst b/docs/userguide/signals.rst
index 00dab2dd9..fd6dae378 100644
--- a/docs/userguide/signals.rst
+++ b/docs/userguide/signals.rst
@@ -88,7 +88,7 @@ Provides arguements:
Task message body.
This is a mapping containing the task message fields
- (see :ref:`task-message-protocol-v1`).
+ (see :ref:`message-protocol-task-v1`).
* exchange
@@ -131,14 +131,14 @@ Provides arguments:
* headers
- The task message headers, see :ref:`task-message-protocol-v2`
- and :ref:`task-message-protocol-v1`.
+ The task message headers, see :ref:`message-protocol-task-v2`
+ and :ref:`message-protocol-task-v1`.
for a reference of possible fields that can be defined.
* body
- The task message body, see :ref:`task-message-protocol-v2`
- and :ref:`task-message-protocol-v1`.
+ The task message body, see :ref:`message-protocol-task-v2`
+ and :ref:`message-protocol-task-v1`.
for a reference of possible fields that can be defined.
* exchange
From 100168871a3bc4607bc07b11d7de6d7c72e8cc4b Mon Sep 17 00:00:00 2001
From: Ask Solem
Date: Thu, 22 May 2014 14:44:09 +0100
Subject: [PATCH 0173/1103] Fixes docs and test issues
---
README.rst | 4 ++--
celery/app/trace.py | 4 +++-
celery/beat.py | 2 +-
celery/tests/case.py | 8 +++++---
celery/tests/worker/test_loops.py | 2 +-
celery/tests/worker/test_request.py | 1 +
celery/tests/worker/test_worker.py | 2 +-
celery/worker/request.py | 4 ++--
docs/configuration.rst | 8 ++++++++
docs/images/worker_graph_full.png | Bin 101018 -> 107927 bytes
10 files changed, 24 insertions(+), 11 deletions(-)
diff --git a/README.rst b/README.rst
index 8e349b866..392965271 100644
--- a/README.rst
+++ b/README.rst
@@ -234,9 +234,9 @@ by using brackets. Multiple bundles can be specified by separating them by
commas.
::
- $ pip install celery[librabbitmq]
+ $ pip install "celery[librabbitmq]"
- $ pip install celery[librabbitmq,redis,auth,msgpack]
+ $ pip install "celery[librabbitmq,redis,auth,msgpack]"
The following bundles are available:
diff --git a/celery/app/trace.py b/celery/app/trace.py
index c26961cde..3e04628a2 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -491,9 +491,11 @@ def _fast_trace_task(task, uuid, request, body, content_type,
embed = None
tasks, accept, hostname = _loc
if content_type:
- args, kwargs, embed = loads(
+ X = loads(
body, content_type, content_encoding, accept=accept,
)
+ print(X)
+ args, kwargs, embed = X
else:
args, kwargs = body
request.update({
diff --git a/celery/beat.py b/celery/beat.py
index d316ac251..372441221 100644
--- a/celery/beat.py
+++ b/celery/beat.py
@@ -478,7 +478,7 @@ def start(self, embedded_process=False):
interval = self.scheduler.tick()
if interval:
debug('beat: Waking up %s.',
- humanize_seconds(interval, prefix='in '))
+ humanize_seconds(interval, prefix='in '))
time.sleep(interval)
except (KeyboardInterrupt, SystemExit):
self._is_shutdown.set()
diff --git a/celery/tests/case.py b/celery/tests/case.py
index 551d0dfbb..a05c8c5a1 100644
--- a/celery/tests/case.py
+++ b/celery/tests/case.py
@@ -869,7 +869,8 @@ def restore_logging():
root.handlers[:] = handlers
-def TaskMessage(name, id=None, args=(), kwargs={}, **options):
+def TaskMessage(name, id=None, args=(), kwargs={}, callbacks=None,
+ errbacks=None, chain=None, **options):
from celery import uuid
from kombu.serialization import dumps
id = id or uuid()
@@ -878,9 +879,10 @@ def TaskMessage(name, id=None, args=(), kwargs={}, **options):
'id': id,
'task': name,
}
+ embed = {'callbacks': callbacks, 'errbacks': errbacks, 'chain': chain}
message.headers.update(options)
message.content_type, message.content_encoding, message.body = dumps(
- (args, kwargs), serializer='json',
+ (args, kwargs, embed), serializer='json',
)
- message.payload = (args, kwargs)
+ message.payload = (args, kwargs, embed)
return message
diff --git a/celery/tests/worker/test_loops.py b/celery/tests/worker/test_loops.py
index 4030782f4..4473eb47e 100644
--- a/celery/tests/worker/test_loops.py
+++ b/celery/tests/worker/test_loops.py
@@ -158,7 +158,7 @@ def test_on_task_message_missing_name(self):
x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
msg.headers.pop('task')
on_task(msg)
- x.on_unknown_message.assert_called_with(((2, 2), {}), msg)
+ x.on_unknown_message.assert_called_with(msg.payload, msg)
def test_on_task_not_registered(self):
x, on_task, msg, strategy = self.task_context(self.add.s(2, 2))
diff --git a/celery/tests/worker/test_request.py b/celery/tests/worker/test_request.py
index 2700d26ef..7a202fa72 100644
--- a/celery/tests/worker/test_request.py
+++ b/celery/tests/worker/test_request.py
@@ -619,6 +619,7 @@ def test_fast_trace_task(self):
self.assertIs(trace.trace_task_ret, trace._fast_trace_task)
tid = uuid()
message = TaskMessage(self.mytask.name, tid, args=[4])
+ assert len(message.payload) == 3
try:
self.mytask.__trace__ = build_tracer(
self.mytask.name, self.mytask, self.app.loader, 'test',
diff --git a/celery/tests/worker/test_worker.py b/celery/tests/worker/test_worker.py
index 5ac5f6a9a..ebf4425c6 100644
--- a/celery/tests/worker/test_worker.py
+++ b/celery/tests/worker/test_worker.py
@@ -912,7 +912,7 @@ def Loader(*args, **kwargs):
os.environ['FORKED_BY_MULTIPROCESSING'] = "1"
try:
process_initializer(app, 'luke.worker.com')
- S.assert_called_with(app)
+ S.assert_called_with(app, 'luke.worker.com')
finally:
os.environ.pop('FORKED_BY_MULTIPROCESSING', None)
diff --git a/celery/worker/request.py b/celery/worker/request.py
index 7193a9013..c03b42d54 100644
--- a/celery/worker/request.py
+++ b/celery/worker/request.py
@@ -207,10 +207,10 @@ def execute(self, loglevel=None, logfile=None):
self.acknowledge()
request = self.request_dict
- args, kwargs = self.message.payload
+ args, kwargs, embed = self.message.payload
request.update({'loglevel': loglevel, 'logfile': logfile,
'hostname': self.hostname, 'is_eager': False,
- 'args': args, 'kwargs': kwargs})
+ 'args': args, 'kwargs': kwargs}, **embed or {})
retval = trace_task(self.task, self.id, args, kwargs, request,
hostname=self.hostname, loader=self.app.loader,
app=self.app)[0]
diff --git a/docs/configuration.rst b/docs/configuration.rst
index 864b255dd..2aa37164e 100644
--- a/docs/configuration.rst
+++ b/docs/configuration.rst
@@ -1110,6 +1110,14 @@ compression schemes registered in the Kombu compression registry.
The default is to send uncompressed messages.
+.. setting:: CELERY_TASK_PROTOCOL
+
+CELERY_TASK_PROTOCOL
+~~~~~~~~~~~~~~~~~~~~
+
+Default task message protocol version.
+Supports protocols: 1 and 2 (default is 1 for backwards compatibility).
+
.. setting:: CELERY_TASK_RESULT_EXPIRES
CELERY_TASK_RESULT_EXPIRES
diff --git a/docs/images/worker_graph_full.png b/docs/images/worker_graph_full.png
index 867bcfb7429cfb6bd1611dacbda6a124d78829a9..38cb75c902b5f9076ba54adc12d5ff387e1bd66c 100644
GIT binary patch
literal 107927
zcmY(rWmH^E6D{ERa~Ku
zGT}Up)SX^q+)q1P%o`V0JQ_PzbQl@taM>lGVBUR-7m!#-A%O;c<^T(^lAz+>>dsi&
zUf!%kG{mcQ+^#IXzEs_qPqVVP+wOFvIy`Aw?sPn!wB34$r(OQ#?JG4baS7E(fx^%S
z$@n5LK~r>tQ+x&CIq;1#cx2u_{NG=Q5o$ogMBxADOJOv9*cpZ4z5wI@yBrF`&KL|l
z%uIOA!~id|Eml@o_{gLR+2CY)1>V)eZGzI-hLn=hBOUKw2(V)#@Yr%T#giBWcub)1bwkIUT~S96A|9O
ze?M`_=uHe!XP3(EO3BF7QR#$n=?5Q02)Lcg+B-QEQW7qcR8&{AFU%fXSf%)~f#=}V
z@T-=}VK7`z1AeJ!&q{>tUuwEJt^I?RU;YXjmAzI`R+K{V{LUg89FpdY*8CkL)4dKH
z^ze)b`dto%AMNbL-qt=XW_G+_maeXa^QhMs=r*#k#YtK6d(H;WeIBh=*Q({w7|P#(
zIGw>7e$&zPw@3Qm1N^l8M)q4}4M#fIATf@)BezzWZXS?ufUpY0CDT9PX@5lrf3z({
zn+2L;?F?AwbQc2!qYxmV=flr?+xtkWKOC`vJ~6cy|ELdgmd&Z)$aZhz-mi~2{>-la
zp}*#^Z)fleG%`FOQfQTviF5e&=M|DTx17Xy=AF0%7PwrF6-r-Is}Fuf4_amAG{tbI
zT>(PEv2MOWZ@SkfgjUlZ-R!GO?FNH(8Mnt;E@!DpSd}&Oc_}k?VE~;jQH(85ixvj4
zn~jzQHQ0GS9AiAaDT6})-P=;%KH1c6R}gC>^3@JE95pG0AIP86-<^lZ`yW^g1NY;n
zf{iCjOu~;cw^pl6513shZ&s5c3TU$4?uQ|TUAlm+9ue>DhRwyzO<_vl)*|H{7GMJj
z=v1^r`F<;esi)I@L0#SyLBHWrp)i=jpk86WAYib62Vo$z-`@yYm>=f^UeU>Q@}Z#+
z8GsDozs{&B>o}3~K8>-(xe@D+aC-lGvuLJdDB7AG^vxR?0aOmJ8ycN<&j9qvz}0f@
zuVF30(2Bs;q*w0)Jp{8Mo
zdx8-bi@rnxT!R7~_tU4GjU`J=3<9ym({Jpx(7;K6TWSaZw?xW0Cy$oSX)8|7!tt)=
z>wLi^NZ$HM?7Ab~0+B+y;MByTfefZdlC?!4F^K}K9(KPNlGofU5(N~|2#>F~
zMFr;LvyZm7O@i;5rL+IxNEm*y4tp0paW7Jly2nu5~@q7y|LXa?75+Z(|5Tot{6HpO4?%rwfM*ATo{*$;i8w2|_vkjnmD2$p=
zU?H}_X2emZfQLBc^L^nmCN8=Z+W3zfKAkKgj1*SCjr94glLI;>6YF#*y(Q>3k?2zZ
zBKa#4`j7jQUoJ^rvhH*2yLX}JO3VZ{FFXbm`V4*t;pi6D7N~uQ&hatL$t}qa9{*i%
z{>{?x@$~dh!8bmh;o;#l#>FXVnME=4%-fa;Th{f2;+AgZ@Og?Pu^4fC1HdVI7@p)dlg-3>EEBX#dE24e3{XgPQBO5NQg_H>bcB5^>x9()#>RJ!4?iT$
z8(gRVQ1>4n9SLC-!YXI&V#suV0$J0`s_3YR+@37Rd>LQg`8H(zpO=&YUZRE-xfB+M
zu(gFXo=dh%{e?B5-P0496N^&GsxkpZ?+01R6P@DAwY1~iujY~lrsUrI@6-CKb9Scls!ohyV3mwzrHF2^9J
zre?*5_Pz%9;SX`+>rKY^vv&`0Lx(r*FFp{;lNh)VPNbx9O2T_kON+-Ct%@Eftktdp$E=KVYx#hhfpI0xh8F}vK_f9=1qb(5S
z_)MBMwlkK+zHj?-jRW~&8)oHly@YUac9m!NxRQv1ESV=``!})v>V@`Ix>415qm!RP
z<35K+n2UR{w53)zvO~O+{o02JQS@Q@^||HBGOO8<)hWW8?_~{{ivn|@+UJ&^n%*+*
zsI0Uz2}p_6s5_iK^B;Q*!W0e|C-A=944X!eCwE>8uV;RFijDQ!f3KRH{Qi+3!q8!d
zG1p|h+(W<)9AW!%Hj7CH(QCt8w}9E6o}G;#bYGKe%OI71OR$E3vCZHf{?4EB?Ot0W
z=6&6O6C!aazAJD37-NTX8X;yn!=YCb%7w{`W$;jOyLsfK!Q^I@eO>Si8P5*0>)PbJQ
z>8^iBOnZA>L#X%NXjcLM%f{o?vtAcz
z)tZ{Gx9^#t$vg;LY_a}D+TI*5>5f`!X>$jBeY
zg?;X?LNCG{=Ga11uR`ZqLn;;;GS|dCZua=@315X<%Ql`qX|*{O^Sl$ip6k2~WPTb%
z+s2~Ikyw5Gom^U)n^dLCLA^-R@8WsDD7LcM7tfl$?V%xJN()>sy#jbeF%1>uOGjWVigv)!X3ne2zB*}?wVZ2dw)dby1yGWnO|
zw{zLmYP4MwwtAmK8c1VoY*DJUMRGx4q?Ow+SxhW63l!xI&NqCf53=%OJ=-`Rqo=f^
zf21JbcazjY;q3^fcSyQ5d8AD|wvGfdz~-;Y<1UwAYGd`mDk(lzpFUp)njkw(PmQn8
z#5}^ii=czE+5>1~wh6PP9tghk(^FVmQ-Ug|`@L*<(V%E^a&qCRzH66#5!Lg`{`D)>lWw)LzAq!M
z-6?j!zu=I60DA0rb7Eb!_+0~Zzn2&2_1o6;Z$U!Xv`X^x5yTwjrL}Tex(04b&j&<<
z)7BcP{><8{x-t@tth%TJu;qJu#N>st@-E3s{vj8Lnho{=OLF{VOYnJ{@g-}2R38qS
zUxm%(6n}(WTu-oSoAnE46L3c3D7@rQwYH&XYxHMK?a2lX5JY?-#pt~&vw)xR&yf{z
zZ1+UYo*x8TE6xci>W;sg$9Z^gA3Vl3+~xwi2u7;F3*$Yi@DB$icD9u#bl$bdN8Dei
z|53@f;{fD8L*C6-GqXMFQvb!z#hIw6^?_oYizy}}^JsH3X5fYU#4AvV1t;9-6=`7h
zpI)oj=ZFTmyJQGo@=WMgwc*)ilKmK!L-bSZ&|^;xoOKv3t(>dpWxbO;z2~!2@eggS
zEoe)yYhu15AO%F6WvR>{v7S7CO{?!^5}Mb9Q7+KFLeJA7Y=aPGGJ>@8?*oxdQp(0w
z9xJYLoS%I@e0U$~sOf{PrXn9Fs81afD6S|Y6h0txMo1FTYu4g=z%(5y&>TdXIb-H$
zUwsEh+iy_H=e_22fn(l4;h?RsfVWRclS@K%*7Mk`?0BAjhDWHK_z
zpR)NyB-@Uq)~i#KJ9J(}wR`=Ut6s|vr}q*?WsQ-6K3mO@OsZTKI)i)u_3uTL%EI`o
z={mCoI;TR6=T)?ffzvG|5z%CyR$LU+bJydqTA-oEMTGp!JtU)zaCB*A{-|6r4dxQ*XwPZ-ULjY|1jQ25lhqJfKR7o|TQ)k9O?6b2Nd
zv9%u+bkRI-*7Q7OBW>rLHsTn)1G4V_&hEkaTxzDGzsFYDiV^**PF2uG`0|vZ-AfZy
z@m}Gqn{dAqbr06~mPPN8a^L>T4hW{xFJ71CNmsopMLq)C2-of%j}ZW&j#2?2eAC{_
z(~Jgcbc(wAvk2Fh^dsZ!-+O#cn`*|F&Fn06?G=V!J>Qx?85!5Ytjj?~e9YU%y5B+Z
zCco(BBMV325kGCetKj*BPAIAO^5~QACap$0y7g}>(rrQ&$_CN0>Afit?y~;N1S>Pj
z!DZLOm**SGS1S3QAcMZ(HhA;ak;K>>jCZgCG~XZ7d%sXvr(eN{`KpCxJIKmlBb)C
z*Td!2#ikx}I>|7#TZv?l&L57KR%y3aZ2
z^RD;jt3GjB>G*rA-nYS5$CagYSXiB&V3tQWCK~#mf|Yr8zh}E{sDi*K+CBXaYq$T^
z0@~;)ceC?J`Rg7m04LCX|5wn$#wK}jGrFi9w0jo`-dv>}$ej`sDNlYFT@hI=Uef{
z2Yutm`ttGOJq5|w0ChHz$Ym_N`*M;GtjoAU4Xe>2bO3Ah!m$
zP39pP`MP^oSE&CLV?b$ZiAZi^hzr6>Y`l7b6BntPFuF)5(TO*p=z}o^eMH0cUA0_s
z3Xh1&v398#-(y&!e-n~`*V~@|ARY~9epM%g?k7RFgR}e+?`-DHa6Ppt^l?E0w+A8t
zS9irOG7`-uDUxRb!2RqbfK^;ub&I&G5lY*d;Dlj-|
zW&eZ3eM{M;zC0>%{_+@5zCsKTcMfW|vaAHw3f1}ZR$c_9FbltH0zai>3CN04H-2Ru
z+l)E-tVeXZkAJE8R_*ylzp;y(ee1ch@@w6>EGh~b6i>SK1AU3*r1J6+|G2pPZf3B{
zsOSOdR}~fnnu+)KT$%z#y7pFw-hsFd&XB@~%vb%CG1F6|0Kz6Z+;4XjEJHfBIx9_n
zKTAzUAIDayXEDLL1W*~!6gj4V1`gbuT&)s-!uhlSNB6dU{%`+Cb`yyD%
zW+yfz1fTb5O{W>bj6p_+s}!OK0P{7nYz@SpDHkb!l_Ce+71q%HU4j%Q<<;nFgcj6`~m*-Zap5k~w4
zd~6x9woNCOMliM7EXL{FH>Ce_pa(LboO_uG6#t6gmo($-exAsSiB2e(8}~_&7lD-T
zH`oRPwTexY)NHtaF@f)TklU7MSN%U<;C?FvGwBL=L5&_XKk2>D%!Io8&6-Ne1
z56){sP`?8g(w72Njv}k5s>x)x+tP}P2g}Y!%LD4@21m*OMoLzlMG(``UI?+bbSNpR
zOtUy>rRLMT>u|rHJCAg;kkP*ZyL)()aPWs7kB`4h`HL0n1SMVmkD>%~E%-h7kU=jToH$
zA^e|q(cZNCTeZIaOY?1$qTHxcX*(A;r?R~S4LMx#q=V0r@bYmLugCyOQ7fSv)PXk=F;|7
zuC|HAQ!kmu>nO+MFE-cC9W*nWiekcI3#}&F_;);Q|3AUp4Sp{HNC$=?
zZ9L@LxE2Fl)oe*qWw5`OkeSYeSU8dlYGY%@))a%@XnsO*wSxU0o{Oss<_AZ&Cm8`{
z5bv^%3XN1B!?!FEL1-Jty&Z4Fn-94yahFtlOp22p4!i$pUHh#;ptePD)(Jn-s^yfL
z#)VJKMI78zWSbAaTH9H9jVychp#4Zq$CczS^ot-CT|N~dMNZ$$Ztj$oV<7va?NH9%
z!Kv_1)w@<*uf%990t%lPX$W2%xeFTvPGr(!H|f7^p3&_Na1RJ3PS0G8ZUmqhK(l4l
z3d-vF4Gn)Iz6l8G_Fxpv%&y9J-CYk)%`eOEYf1+mcGrkH3kmbRMTyS}S}Ef6$gEe_
z)qGO9Bz}e^4L1#ucS#;?j|ysAo>B;s`1AX?lPRnJ+0=V6KxzT>DRIHt6Dpsv)VJ*W
z)OOlSU@`abzHVdcdU-Hr@+S+|hLvue>~k7(^Ys9pswl5csS3fIFw9<&Ln=aW6cLL#
zWK}ngEl5shHx?a9#?}POyaimqVx@qm=T~J!_v_4S2`1ArN<>%+DWLakd|cz4;VO9V
z6B=C7j>oBItKcR}zU?_xma&n&gO2*{aDX#w>*P?V(3E?mLu>km(EBgur&B@JmgTQ=
z{ExC(u|_Tj=0pWI3oq3Dco}8EZ29t`8iL_N7kLLqHXrxWpK--+tt)2_
zXBmn-nrxZ&oin`%CK_{cNV0i!I>hMeDhAR5WAK5@k;C#9>#PJLc`_cNxms1+bl;iw
zf&Yy6?)52aeQSGY+cWXXdYYY{mHhHj3&Ca!$$K`8jO;cs3`sl|IajnevdST^9j2!L
z`pd+yC+eJ&%7!lQafi2Lh~sA0hw6zF#!U3-fcmjb%PaLl_42
zasDf%=Sk%3oG=VkTM=V#I(eqp3+28wSE2T1=A<
zRSy>mR1+^i-1`f#RemCL6JbjU8HxwUWH!
z#*gS;&tKHPxgA@z=cYNS>T1l$ma#GCH~Yj3BA}h+zJoT7EUI|$082c2OSm`9MQQ)w
z5vhkdgxrF90~vb;G2c=BRG_#xbSQ1BmNp^Qz;3gbI;_}%Y0k$dR-HJ{Bb!=W6~zv!`=UL7Zfv5o*)g9e`?#>HT{%SGhd7EEI1P+Dn
zHHGeHN6zRBI#T6r{u&OeHC;DIS<+(SsZ9H)3$&iuUOQiopN~YcG-NCsGE00I>LV~;
zEFj9)6?e(T&mI7yHjX
zbK?!-B0{I4$y_=HwMliz7jcTBx;aiKjr
z4n!b5sIA*o6!cQ5Pz9N&fu|6%3ngr7M2_?}+JpDxR`rZ5TP)jZVy6pn$M_j8fbKti
zUHDrV-9DAgg>}%kYkwybElILiDtZ##K=3zP3pMis5jNk#-Y-R1)%K=vI5P5dc+m2h
z$9p18Wj8Jw?ksj7bIrWv>1z%A3pqQS0j#)X^)iMb#_CaX7bS?^RZ5FW17wgmw)I8}
z@f;yhEn}|RHQItu*xcDRML>fJ8#Bx-(4_88)zy$Tl&1`6#6N1G){EUT2iW
zIY{8M2C8j1)#H5}lBreoSKmJ8PK60O>z}s23;Ng~P0?o|JCpCBFGW
zEa_0jm4cZbz5iHj{Lu;LZA~79*``BC2o%uIJW$e=kDN)YjF!x`%=nR6)ksN!ZmSI&waPlhbiT?_(Or${QIJ^w$?H!UrH)c@LQ3M!#VxQtSsM(pj6I4C2-e
zM3<)AOvM@Od~sZjq~;s(Pfz=hcRX$tXm#a-#~0m{Y?1R_lJCGu+~!i1TO++>pp*8i47#n!%2HKkz$
zB#hR%rU`#Yg@5-B&IoE?DD{VWBRY`qG69~jVw#3?m851KVmVY$_s)RiURJ!YOW-I+;h^o7L+@z14V
zYBWe=wfsQA`9y5g$HZQU1JU)IQ6wh)%mU#*h2oL|@dk(x?`DEuZ!bw{2R-I(C2`1%
zC={0>#aMTf{|f-qZ$`wWwCNO@k$QH2h~vgq>y?HJp~F(Y(=nO-me3+rxIqKr$eF4$
z6NWm3N`}6}Aw%TpWmihaHy@*2fh>dC;NR$F5R%FSMevLVq+SQJL*%tk~N+6An-2{KU
zif->ZlU3!uJv0&y!iTOiOraUjb1%HeQ>m$~*RKM51Px4S0eE9z?JGU~$WFAZ?Qw2J
zmGpwd^hPf9y!6UHZ|%Bt48B=Z8z%S7!Ob_=?GBP75bhA3WM8kp5zF~m8v8dXD}J%u
zGJ#EksAqUXx>MC^l+?`9d#V>f6!9p^k8mfT;1$S)QZr(Ix9o^t$?P3|6eAC&v-XEC
zL*H5OH=HRZoH`Yp87c-h&3JOOKk^LU%nWiVXv%%*5D+p#3bEl`@;rfN?m1w$WBPB|Ul|
zrvKJ5pym6X^3#^_2T_}0pvP$hP%PYdSlrxdBjm)RHRW|Ix0{RaP`X3-INsTyAo~3$JN<;0?ssTx2)Az`
z=+y}GMnDZzkZkxR6Y44Ebg)&=WJVws2lpoQ+vNWev-De~L(LiRE#y56(drWAGhLfI
z_qH=59kG(%h6=lyrl^&RA+o}0s{g^(AnEhlw~k3;zTEMpsk$Ko!4H)
z=?{NT)n%smuBH2E=v)4209;Jpm*D~w2LN-!QA7nBZdm?ASz@{6!;6IDKVv>usO9la
z%vI`7CrNzC