Commit 73d89df3 authored by Julien Muchembled's avatar Julien Muchembled

CMFActivity: optimization, cleanup, limit insertion by size in bytes instead of number of rows

parents 7fb53e8f 3ca5bf97
......@@ -33,14 +33,6 @@ from zLOG import LOG, WARNING, ERROR
from ZODB.POSException import ConflictError
from cStringIO import StringIO
import transaction
# Error values for message validation
EXCEPTION = -1
VALID = 0
INVALID_PATH = 1
INVALID_ORDER = 2
# Time global parameters
MAX_PROCESSING_TIME = 900 # in seconds
VALIDATION_ERROR_DELAY = 15 # in seconds
......@@ -96,52 +88,6 @@ class Queue(object):
def distribute(self, activity_tool, node_count):
raise NotImplementedError
def validate(self, activity_tool, message, check_order_validation=1, **kw):
"""
This is the place where activity semantics is implemented
**kw contains all parameters which allow to implement synchronisation,
constraints, delays, etc.
Standard synchronisation parameters:
after_method_id -- never validate message if after_method_id
is in the list of methods which are
going to be executed
after_message_uid -- never validate message if after_message_uid
is in the list of messages which are
going to be executed
after_path -- never validate message if after_path
is in the list of path which are
going to be executed
"""
try:
if activity_tool.unrestrictedTraverse(message.object_path, None) is None:
# Do not try to call methods on objects which do not exist
LOG('CMFActivity', WARNING,
'Object %s does not exist' % '/'.join(message.object_path))
return INVALID_PATH
if check_order_validation:
for k, v in kw.iteritems():
if activity_tool.validateOrder(message, k, v):
return INVALID_ORDER
except ConflictError:
raise
except:
LOG('CMFActivity', WARNING,
'Validation of Object %s raised exception' % '/'.join(message.object_path),
error=sys.exc_info())
# Do not try to call methods on objects which cause errors
return EXCEPTION
return VALID
def getDependentMessageList(self, activity_tool, message):
message_list = []
for k, v in message.activity_kw.iteritems():
message_list += activity_tool.getDependentMessageList(message, k, v)
return message_list
def getExecutableMessageList(self, activity_tool, message, message_dict,
validation_text_dict, now_date=None):
"""Get messages which have no dependent message, and store them in the dictionary.
......@@ -165,8 +111,7 @@ class Queue(object):
cached_result = validation_text_dict.get(message.order_validation_text)
if cached_result is None:
message_list = self.getDependentMessageList(activity_tool, message)
transaction.commit() # Release locks.
message_list = activity_tool.getDependentMessageList(message, self)
if message_list:
# The result is not empty, so this message is not executable.
validation_text_dict[message.order_validation_text] = 0
......@@ -189,9 +134,6 @@ class Queue(object):
elif cached_result:
message_dict[message.uid] = message
def hasActivity(self, activity_tool, object, processing_node=None, active_process=None, **kw):
return 0
def flush(self, activity_tool, object, **kw):
pass
......@@ -201,7 +143,7 @@ class Queue(object):
key_list = message.activity_kw.keys()
key_list.sort()
for key in key_list:
method_id = "_validate_%s" % key
method_id = "_validate_" + key
if getattr(self, method_id, None) is not None:
order_validation_item_list.append((key, message.activity_kw[key]))
if len(order_validation_item_list) == 0:
......@@ -216,14 +158,6 @@ class Queue(object):
def getMessageList(self, activity_tool, processing_node=None,**kw):
return []
def countMessage(self, activity_tool,**kw):
return 0
def countMessageWithTag(self, activity_tool,value):
"""Return the number of messages which match the given tag.
"""
return self.countMessage(activity_tool, tag=value)
# Transaction Management
def prepareQueueMessageList(self, activity_tool, message_list):
# Called to prepare transaction commit for queued messages
......
......@@ -33,22 +33,19 @@ import MySQLdb
from MySQLdb.constants.ER import DUP_ENTRY
from DateTime import DateTime
from Shared.DC.ZRDB.Results import Results
from Shared.DC.ZRDB.DA import DatabaseError
from zLOG import LOG, TRACE, INFO, WARNING, ERROR, PANIC
from ZODB.POSException import ConflictError
from Products.CMFActivity.ActivityTool import (
Message, MESSAGE_NOT_EXECUTED, MESSAGE_EXECUTED, SkippedMessage)
from Products.CMFActivity.ActivityRuntimeEnvironment import (
DEFAULT_MAX_RETRY, ActivityRuntimeEnvironment)
from Queue import Queue, VALIDATION_ERROR_DELAY, VALID, INVALID_PATH
from Queue import Queue, VALIDATION_ERROR_DELAY
from Products.CMFActivity.Errors import ActivityFlushError
# Stop validating more messages when this limit is reached
MAX_VALIDATED_LIMIT = 1000
# Read this many messages to validate.
READ_MESSAGE_LIMIT = 1000
# TODO: Limit by size in bytes instead of number of rows.
MAX_MESSAGE_LIST_SIZE = 100
INVOKE_ERROR_STATE = -2
# Activity uids are stored as 64 bits unsigned integers.
# No need to depend on a database that supports unsigned integers.
......@@ -70,13 +67,14 @@ def sort_message_key(message):
_DequeueMessageException = Exception()
def render_datetime(x):
return "%.4d-%.2d-%.2d %.2d:%.2d:%09.6f" % x.toZone('UTC').parts()[:6]
# sqltest_dict ({'condition_name': <render_function>}) defines how to render
# condition statements in the SQL query used by SQLBase.getMessageList
def sqltest_dict():
sqltest_dict = {}
no_quote_type = int, float, long
def render_datetime(x):
return "%.4d-%.2d-%.2d %.2d:%.2d:%09.6f" % x.toZone('UTC').parts()[:6]
def _(name, column=None, op="="):
if column is None:
column = name
......@@ -102,7 +100,6 @@ def sqltest_dict():
_('group_method_id')
_('method_id')
_('path')
_('processing')
_('processing_node')
_('serialization_tag')
_('tag')
......@@ -115,103 +112,143 @@ def sqltest_dict():
return sqltest_dict
sqltest_dict = sqltest_dict()
def getNow(db):
"""
Return the UTC date from the point of view of the SQL server.
Note that this value is not cached, and is not transactionnal on MySQL
side.
"""
return db.query("SELECT UTC_TIMESTAMP(6)", 0)[1][0][0]
class SQLBase(Queue):
"""
Define a set of common methods for SQL-based storage of activities.
"""
def createTableSQL(self):
return """\
CREATE TABLE %s (
`uid` BIGINT UNSIGNED NOT NULL,
`date` DATETIME(6) NOT NULL,
`path` VARCHAR(255) NOT NULL,
`active_process_uid` INT UNSIGNED NULL,
`method_id` VARCHAR(255) NOT NULL,
`processing_node` SMALLINT NOT NULL DEFAULT -1,
`priority` TINYINT NOT NULL DEFAULT 0,
`group_method_id` VARCHAR(255) NOT NULL DEFAULT '',
`tag` VARCHAR(255) NOT NULL,
`serialization_tag` VARCHAR(255) NOT NULL,
`retry` TINYINT UNSIGNED NOT NULL DEFAULT 0,
`message` LONGBLOB NOT NULL,
PRIMARY KEY (`uid`),
KEY `processing_node_priority_date` (`processing_node`, `priority`, `date`),
KEY `node_group_priority_date` (`processing_node`, `group_method_id`, `priority`, `date`),
KEY `serialization_tag_processing_node` (`serialization_tag`, `processing_node`),
KEY (`path`),
KEY (`active_process_uid`),
KEY (`method_id`),
KEY (`tag`)
) ENGINE=InnoDB""" % self.sql_table
def initialize(self, activity_tool, clear):
folder = activity_tool.getPortalObject().portal_skins.activity
try:
createMessageTable = folder.SQLBase_createMessageTable
except AttributeError:
return
db = activity_tool.getSQLConnection()
create = self.createTableSQL()
if clear:
folder.SQLBase_dropMessageTable(table=self.sql_table)
createMessageTable(table=self.sql_table)
db.query("DROP TABLE IF EXISTS " + self.sql_table)
db.query(create)
else:
src = createMessageTable._upgradeSchema(create_if_not_exists=1,
initialize=self._initialize,
table=self.sql_table)
src = db.upgradeSchema(create, create_if_not_exists=1,
initialize=self._initialize)
if src:
LOG('CMFActivity', INFO, "%r table upgraded\n%s"
% (self.sql_table, src))
self._insert_max_payload = (db.getMaxAllowedPacket()
+ len(self._insert_separator)
- len(self._insert_template % (self.sql_table, '')))
def _initialize(self, db, column_list):
LOG('CMFActivity', ERROR, "Non-empty %r table upgraded."
" The following added columns could not be initialized: %s"
% (self.sql_table, ", ".join(column_list)))
_insert_template = ("INSERT INTO %s (uid,"
" path, active_process_uid, date, method_id, processing_node,"
" priority, group_method_id, tag, serialization_tag,"
" message) VALUES\n(%s)")
_insert_separator = "),\n("
def prepareQueueMessageList(self, activity_tool, message_list):
registered_message_list = [m for m in message_list if m.is_registered]
portal = activity_tool.getPortalObject()
for i in xrange(0, len(registered_message_list), MAX_MESSAGE_LIST_SIZE):
message_list = registered_message_list[i:i+MAX_MESSAGE_LIST_SIZE]
path_list = ['/'.join(m.object_path) for m in message_list]
active_process_uid_list = [m.active_process_uid for m in message_list]
method_id_list = [m.method_id for m in message_list]
priority_list = [m.activity_kw.get('priority', 1) for m in message_list]
date_list = [m.activity_kw.get('at_date') for m in message_list]
group_method_id_list = [m.getGroupId() for m in message_list]
tag_list = [m.activity_kw.get('tag', '') for m in message_list]
serialization_tag_list = [m.activity_kw.get('serialization_tag', '')
for m in message_list]
processing_node_list = []
for m in message_list:
m.order_validation_text = x = self.getOrderValidationText(m)
processing_node_list.append(0 if x == 'none' else -1)
db = activity_tool.getSQLConnection()
quote = db.string_literal
def insert(reset_uid):
values = self._insert_separator.join(values_list)
del values_list[:]
for _ in xrange(UID_ALLOCATION_TRY_COUNT):
if reset_uid:
reset_uid = False
# Overflow will result into IntegrityError.
db.query("SET @uid := %s" % getrandbits(UID_SAFE_BITSIZE))
try:
portal.SQLBase_writeMessageList(
table=self.sql_table,
uid_list=[
getrandbits(UID_SAFE_BITSIZE)
for _ in xrange(len(message_list))
],
path_list=path_list,
active_process_uid_list=active_process_uid_list,
method_id_list=method_id_list,
priority_list=priority_list,
message_list=map(Message.dump, message_list),
group_method_id_list=group_method_id_list,
date_list=date_list,
tag_list=tag_list,
processing_node_list=processing_node_list,
serialization_tag_list=serialization_tag_list)
db.query(self._insert_template % (self.sql_table, values))
except MySQLdb.IntegrityError, (code, _):
if code != DUP_ENTRY:
raise
reset_uid = True
else:
break
else:
raise ValueError("Maximum retry for SQLBase_writeMessageList reached")
def getNow(self, context):
"""
Return the current value for SQL server's NOW().
Note that this value is not cached, and is not transactionnal on MySQL
side.
"""
result = context.SQLBase_getNow()
assert len(result) == 1
assert len(result[0]) == 1
return result[0][0]
raise RuntimeError("Maximum retry for prepareQueueMessageList reached")
i = 0
reset_uid = True
values_list = []
max_payload = self._insert_max_payload
sep_len = len(self._insert_separator)
for m in message_list:
if m.is_registered:
active_process_uid = m.active_process_uid
order_validation_text = m.order_validation_text = \
self.getOrderValidationText(m)
date = m.activity_kw.get('at_date')
row = ','.join((
'@uid+%s' % i,
quote('/'.join(m.object_path)),
'NULL' if active_process_uid is None else str(active_process_uid),
"UTC_TIMESTAMP(6)" if date is None else quote(render_datetime(date)),
quote(m.method_id),
'0' if order_validation_text == 'none' else '-1',
str(m.activity_kw.get('priority', 1)),
quote(m.getGroupId()),
quote(m.activity_kw.get('tag', '')),
quote(m.activity_kw.get('serialization_tag', '')),
quote(Message.dump(m))))
i += 1
n = sep_len + len(row)
max_payload -= n
if max_payload < 0:
if values_list:
insert(reset_uid)
reset_uid = False
max_payload = self._insert_max_payload - n
else:
raise ValueError("max_allowed_packet too small to insert message")
values_list.append(row)
if values_list:
insert(reset_uid)
def _getMessageList(self, activity_tool, count=1000, src__=0, **kw):
def _getMessageList(self, db, count=1000, src__=0, **kw):
# XXX: Because most columns have NOT NULL constraint, conditions with None
# value should be ignored, instead of trying to render them
# (with comparisons with NULL).
sql_connection = activity_tool.getPortalObject().cmf_activity_sql_connection
q = sql_connection.sql_quote__
q = db.string_literal
sql = '\n AND '.join(sqltest_dict[k](v, q) for k, v in kw.iteritems())
sql = "SELECT * FROM %s%s\nORDER BY priority, date, uid%s" % (
self.sql_table,
sql and '\nWHERE ' + sql,
'' if count is None else '\nLIMIT %d' % count,
)
return sql if src__ else Results(sql_connection().query(sql, max_rows=0))
return sql if src__ else Results(db.query(sql, max_rows=0))
def getMessageList(self, *args, **kw):
result = self._getMessageList(*args, **kw)
def getMessageList(self, activity_tool, *args, **kw):
result = self._getMessageList(activity_tool.getSQLConnection(), *args, **kw)
if type(result) is str: # src__ == 1
return result,
class_name = self.__class__.__name__
......@@ -219,61 +256,30 @@ class SQLBase(Queue):
activity=class_name,
uid=line.uid,
processing_node=line.processing_node,
retry=line.retry,
processing=line.processing)
retry=line.retry)
for line in result]
def countMessage(self, activity_tool, tag=None, path=None,
method_id=None, message_uid=None, **kw):
"""Return the number of messages which match the given parameters.
"""
if isinstance(tag, str):
tag = [tag]
if isinstance(path, str):
path = [path]
if isinstance(method_id, str):
method_id = [method_id]
result = activity_tool.SQLBase_validateMessageList(table=self.sql_table,
method_id=method_id,
path=path,
message_uid=message_uid,
tag=tag,
serialization_tag=None,
count=1)
return result[0].uid_count
def hasActivity(self, activity_tool, object, method_id=None, only_valid=None,
active_process_uid=None,
only_invalid=False):
hasMessage = getattr(activity_tool, 'SQLBase_hasMessage', None)
if hasMessage is not None:
if object is None:
path = None
else:
path = '/'.join(object.getPhysicalPath())
try:
result = hasMessage(table=self.sql_table, path=path, method_id=method_id,
only_valid=only_valid, active_process_uid=active_process_uid,
only_invalid=only_invalid)
except DatabaseError:
LOG(
'SQLBase',
ERROR,
'%r raised, considering there are no activities' % (
hasMessage,
),
error=True,
)
else:
return result[0].message_count > 0
return 0
def countMessageSQL(self, quote, **kw):
return "SELECT count(*) FROM %s WHERE processing_node > -10 AND %s" % (
self.sql_table, " AND ".join(
sqltest_dict[k](v, quote) for (k, v) in kw.iteritems() if v
) or "1")
def hasActivitySQL(self, quote, only_valid=False, only_invalid=False, **kw):
where = [sqltest_dict[k](v, quote) for (k, v) in kw.iteritems() if v]
if only_valid:
where.append('processing_node > -2')
if only_invalid:
where.append('processing_node < -1')
return "SELECT 1 FROM %s WHERE %s LIMIT 1" % (
self.sql_table, " AND ".join(where) or "1")
def getPriority(self, activity_tool):
result = activity_tool.SQLBase_getPriority(table=self.sql_table)
if result:
result, = result
return result['priority'], result['date']
return Queue.getPriority(self, activity_tool)
result = activity_tool.getSQLConnection().query(
"SELECT priority, date FROM %s"
" WHERE processing_node=0 AND date <= UTC_TIMESTAMP(6)"
" ORDER BY priority, date LIMIT 1" % self.sql_table, 0)[1]
return result[0] if result else Queue.getPriority(self, activity_tool)
def _retryOnLockError(self, method, args=(), kw={}):
while True:
......@@ -285,74 +291,61 @@ class SQLBase(Queue):
LOG('SQLBase', INFO, 'Got a lock error, retrying...')
# Validation private methods
def _validate(self, activity_tool, method_id=None, message_uid=None, path=None, tag=None,
serialization_tag=None):
if isinstance(method_id, str):
method_id = [method_id]
if isinstance(path, str):
path = [path]
if isinstance(tag, str):
tag = [tag]
if method_id or message_uid or path or tag or serialization_tag:
result = activity_tool.SQLBase_validateMessageList(table=self.sql_table,
method_id=method_id,
message_uid=message_uid,
path=path,
tag=tag,
count=False,
serialization_tag=serialization_tag)
message_list = []
for line in result:
m = Message.load(line.message,
line=line,
uid=line.uid,
date=line.date,
processing_node=line.processing_node)
if not hasattr(m, 'order_validation_text'): # BBB
m.order_validation_text = self.getOrderValidationText(m)
message_list.append(m)
return message_list
def _validate_after_method_id(self, activity_tool, message, value):
return self._validate(activity_tool, method_id=value)
def _validate_after_path(self, activity_tool, message, value):
return self._validate(activity_tool, path=value)
def _validate_after_message_uid(self, activity_tool, message, value):
return self._validate(activity_tool, message_uid=value)
def _validate_after_path_and_method_id(self, activity_tool, message, value):
if not (isinstance(value, (tuple, list)) and len(value) == 2):
LOG('CMFActivity', WARNING,
'unable to recognize value for after_path_and_method_id: %r' % (value,))
return []
return self._validate(activity_tool, path=value[0], method_id=value[1])
def _validate_after_tag(self, activity_tool, message, value):
return self._validate(activity_tool, tag=value)
def _validate_after_tag_and_method_id(self, activity_tool, message, value):
# Count number of occurances of tag and method_id
if not (isinstance(value, (tuple, list)) and len(value) == 2):
LOG('CMFActivity', WARNING,
'unable to recognize value for after_tag_and_method_id: %r' % (value,))
return []
return self._validate(activity_tool, tag=value[0], method_id=value[1])
def _validate_serialization_tag(self, activity_tool, message, value):
return self._validate(activity_tool, serialization_tag=value)
def getValidationSQL(self, quote, activate_kw, same_queue):
validate_list = []
for k, v in activate_kw.iteritems():
if v is not None:
try:
method = getattr(self, '_validate_' + k, None)
if method:
validate_list.append(' AND '.join(method(v, quote)))
except Exception:
LOG('CMFActivity', WARNING, 'invalid %s value: %r' % (k, v),
error=True)
# Prevent validation by depending on anything, at least itself.
validate_list = '1',
same_queue = False
break
if validate_list:
return ("SELECT '%s' as activity, uid, date, processing_node,"
" priority, group_method_id, message FROM %s"
" WHERE processing_node > -10 AND (%s) LIMIT %s" % (
type(self).__name__, self.sql_table,
' OR '.join(validate_list),
READ_MESSAGE_LIMIT if same_queue else 1))
def _validate_after_method_id(self, *args):
return sqltest_dict['method_id'](*args),
def _validate_after_path(self, *args):
return sqltest_dict['path'](*args),
def _validate_after_message_uid(self, *args):
return sqltest_dict['uid'](*args),
def _validate_after_path_and_method_id(self, value, quote):
path, method_id = value
return (sqltest_dict['method_id'](method_id, quote),
sqltest_dict['path'](path, quote))
def _validate_after_tag(self, *args):
return sqltest_dict['tag'](*args),
def _validate_after_tag_and_method_id(self, value, quote):
tag, method_id = value
return (sqltest_dict['method_id'](method_id, quote),
sqltest_dict['tag'](tag, quote))
def _validate_serialization_tag(self, *args):
return 'processing_node > -1', sqltest_dict['serialization_tag'](*args)
def _log(self, severity, summary):
LOG(self.__class__.__name__, severity, summary,
error=severity>INFO and sys.exc_info() or None)
def distribute(self, activity_tool, node_count):
assignMessage = getattr(activity_tool, 'SQLBase_assignMessage', None)
if assignMessage is None:
return
now_date = self.getNow(activity_tool)
db = activity_tool.getSQLConnection()
now_date = getNow(db)
where_kw = {
'processing_node': -1,
'to_date': now_date,
......@@ -360,7 +353,7 @@ class SQLBase(Queue):
}
validated_count = 0
while 1:
result = self._getMessageList(activity_tool, **where_kw)
result = self._getMessageList(db, **where_kw)
if not result:
return
transaction.commit()
......@@ -373,6 +366,7 @@ class SQLBase(Queue):
message.order_validation_text = self.getOrderValidationText(message)
self.getExecutableMessageList(activity_tool, message, message_dict,
validation_text_dict, now_date=now_date)
transaction.commit()
if message_dict:
distributable_uid_set = set()
serialization_tag_dict = {}
......@@ -395,8 +389,7 @@ class SQLBase(Queue):
distributable_uid_set.add(message.uid)
distributable_count = len(distributable_uid_set)
if distributable_count:
assignMessage(table=self.sql_table,
processing_node=0, uid=tuple(distributable_uid_set))
self.assignMessageList(db, 0, distributable_uid_set)
validated_count += distributable_count
if validated_count >= MAX_VALIDATED_LIMIT:
return
......@@ -404,60 +397,47 @@ class SQLBase(Queue):
where_kw['from_date'] = line.date
where_kw['above_uid'] = line.uid
def getReservedMessageList(self, activity_tool, date, processing_node,
limit=None, group_method_id=None):
def getReservedMessageList(self, db, date, processing_node, limit,
group_method_id=None):
"""
Get and reserve a list of messages.
limit
Maximum number of messages to fetch.
This number is not garanted to be reached, because of:
- not enough messages being pending execution
- race condition (other nodes reserving the same messages at the same
time)
This number is guaranted not to be exceeded.
If None (or not given) no limit apply.
This number is not garanted to be reached, because of not enough
messages being pending execution.
"""
assert limit
# Do not check already-assigned messages when trying to reserve more
# activities, because in such case we will find one reserved activity.
result = activity_tool.SQLBase_selectReservedMessageList(
table=self.sql_table,
count=limit,
processing_node=processing_node,
group_method_id=group_method_id,
)
limit -= len(result)
if limit:
reservable = activity_tool.SQLBase_getReservableMessageList(
table=self.sql_table,
count=limit,
processing_node=processing_node,
to_date=date,
group_method_id=group_method_id,
)
if reservable:
activity_tool.SQLBase_reserveMessageList(
uid=[x.uid for x in reservable],
table=self.sql_table,
processing_node=processing_node,
)
# DC.ZRDB.Results.Results does not implement concatenation
# Implement an imperfect (but cheap) concatenation. Do not update
# __items__ nor _data_dictionary.
assert result._names == reservable._names, (result._names,
reservable._names)
result._data += reservable._data
return result
def makeMessageListAvailable(self, activity_tool, uid_list):
quote = db.string_literal
query = db.query
args = (self.sql_table, sqltest_dict['to_date'](date, quote),
' AND group_method_id=' + quote(group_method_id)
if group_method_id else '' , limit)
# Get reservable messages.
# During normal operation, sorting by date (as last criteria) is fairer
# for users and reduce the probability to do the same work several times
# (think of an object that is modified several times in a short period of
# time).
if 1:
result = Results(query(
"SELECT * FROM %s WHERE processing_node=0 AND %s%s"
" ORDER BY priority, date LIMIT %s FOR UPDATE" % args, 0))
if result:
# Reserve messages.
uid_list = [x.uid for x in result]
self.assignMessageList(db, processing_node, uid_list)
self._log(TRACE, 'Reserved messages: %r' % uid_list)
return result
return ()
def assignMessageList(self, db, state, uid_list):
"""
Put messages back in processing_node=0 .
Put messages back in given processing_node.
"""
if len(uid_list):
activity_tool.SQLBase_makeMessageListAvailable(table=self.sql_table,
uid=uid_list)
db.query("UPDATE %s SET processing_node=%s WHERE uid IN (%s)\0COMMIT" % (
self.sql_table, state, ','.join(map(str, uid_list))))
def getProcessableMessageLoader(self, activity_tool, processing_node):
def getProcessableMessageLoader(self, db, processing_node):
# do not merge anything
def load(line):
uid = line.uid
......@@ -472,14 +452,12 @@ class SQLBase(Queue):
reserved (definitely lost, but they are expandable since redundant).
- reserve a message
- set reserved message to processing=1 state
- if this message has a group_method_id:
- reserve a bunch of messages
- until the total "cost" of the group goes over 1
- get one message from the reserved bunch (this messages will be
"needed")
- update the total cost
- set "needed" reserved messages to processing=1 state
- unreserve "unneeded" messages
- return still-reserved message list and a group_method_id
......@@ -494,21 +472,28 @@ class SQLBase(Queue):
- group_method_id
- uid_to_duplicate_uid_list_dict
"""
def getReservedMessageList(limit, group_method_id=None):
line_list = self.getReservedMessageList(activity_tool=activity_tool,
date=now_date,
processing_node=processing_node,
limit=limit,
group_method_id=group_method_id)
if line_list:
self._log(TRACE, 'Reserved messages: %r' % [x.uid for x in line_list])
return line_list
now_date = self.getNow(activity_tool)
db = activity_tool.getSQLConnection()
now_date = getNow(db)
uid_to_duplicate_uid_list_dict = {}
try:
result = getReservedMessageList(1)
if result:
load = self.getProcessableMessageLoader(activity_tool, processing_node)
while 1: # not a loop
# Select messages that were either assigned manually or left
# unprocessed after a shutdown. Most of the time, there's none.
# To minimize the probability of deadlocks, we also COMMIT so that a
# new transaction starts on the first 'FOR UPDATE' query, which is all
# the more important as the current on started with getPriority().
result = db.query("SELECT * FROM %s WHERE processing_node=%s"
" ORDER BY priority, date LIMIT 1\0COMMIT" % (
self.sql_table, processing_node), 0)
already_assigned = result[1]
if already_assigned:
result = Results(result)
else:
result = self.getReservedMessageList(db, now_date, processing_node,
1)
if not result:
break
load = self.getProcessableMessageLoader(db, processing_node)
m, uid, uid_list = load(result[0])
message_list = [m]
uid_to_duplicate_uid_list_dict[uid] = uid_list
......@@ -524,7 +509,17 @@ class SQLBase(Queue):
if limit > 1: # <=> cost * count < 1
cost *= count
# Retrieve objects which have the same group method.
result = iter(getReservedMessageList(limit, group_method_id))
result = iter(already_assigned
and Results(db.query("SELECT * FROM %s"
" WHERE processing_node=%s AND group_method_id=%s"
" ORDER BY priority, date LIMIT %s" % (
self.sql_table, processing_node,
db.string_literal(group_method_id), limit), 0))
# Do not optimize rare case: keep the code simple by not
# adding more results from getReservedMessageList if the
# limit is not reached.
or self.getReservedMessageList(db, now_date, processing_node,
limit, group_method_id))
for line in result:
if line.uid in uid_to_duplicate_uid_list_dict:
continue
......@@ -538,10 +533,9 @@ class SQLBase(Queue):
message_list.append(m)
if cost >= 1:
# Unreserve extra messages as soon as possible.
self.makeMessageListAvailable(activity_tool=activity_tool,
uid_list=[line.uid for line in result if line.uid != uid])
activity_tool.SQLBase_processMessage(table=self.sql_table,
uid=uid_to_duplicate_uid_list_dict.keys())
uid_list = [line.uid for line in result if line.uid != uid]
if uid_list:
self.assignMessageList(db, 0, uid_list)
return message_list, group_method_id, uid_to_duplicate_uid_list_dict
except:
self._log(WARNING, 'Exception while reserving messages.')
......@@ -550,8 +544,7 @@ class SQLBase(Queue):
for uid_list in uid_to_duplicate_uid_list_dict.itervalues():
to_free_uid_list += uid_list
try:
self.makeMessageListAvailable(activity_tool=activity_tool,
uid_list=to_free_uid_list)
self.assignMessageList(db, 0, to_free_uid_list)
except:
self._log(ERROR, 'Failed to free messages: %r' % to_free_uid_list)
else:
......@@ -559,7 +552,7 @@ class SQLBase(Queue):
self._log(TRACE, 'Freed messages %r' % to_free_uid_list)
else:
self._log(TRACE, '(no message was reserved)')
return [], None, uid_to_duplicate_uid_list_dict
return (), None, None
def _abort(self):
try:
......@@ -636,6 +629,18 @@ class SQLBase(Queue):
transaction.commit()
return not message_list
def deleteMessageList(self, db, uid_list):
db.query("DELETE FROM %s WHERE uid IN (%s)" % (
self.sql_table, ','.join(map(str, uid_list))))
def reactivateMessageList(self, db, uid_list, delay, retry):
db.query("UPDATE %s SET"
" date = DATE_ADD(UTC_TIMESTAMP(6), INTERVAL %s SECOND)"
"%s WHERE uid IN (%s)" % (
self.sql_table, delay,
", priority = priority + 1, retry = retry + 1" if retry else "",
",".join(map(str, uid_list))))
def finalizeMessageExecution(self, activity_tool, message_list,
uid_to_duplicate_uid_list_dict=None):
"""
......@@ -648,6 +653,7 @@ class SQLBase(Queue):
be put in a permanent-error state.
- In all other cases, retry count is increased and message is delayed.
"""
db = activity_tool.getSQLConnection()
deletable_uid_list = []
delay_uid_list = []
final_error_uid_list = []
......@@ -692,10 +698,7 @@ class SQLBase(Queue):
delay = VALIDATION_ERROR_DELAY * (retry * retry + 1) * 2
try:
# Immediately update, because values different for every message
activity_tool.SQLBase_reactivate(table=self.sql_table,
uid=[uid],
delay=delay,
retry=1)
self.reactivateMessageList(db, (uid,), delay, True)
except:
self._log(WARNING, 'Failed to reactivate %r' % uid)
make_available_uid_list.append(uid)
......@@ -709,9 +712,7 @@ class SQLBase(Queue):
deletable_uid_list.append(uid)
if deletable_uid_list:
try:
self._retryOnLockError(activity_tool.SQLBase_delMessage,
kw={'table': self.sql_table,
'uid': deletable_uid_list})
self._retryOnLockError(self.deleteMessageList, (db, deletable_uid_list))
except:
self._log(ERROR, 'Failed to delete messages %r' % deletable_uid_list)
else:
......@@ -719,21 +720,19 @@ class SQLBase(Queue):
if delay_uid_list:
try:
# If this is a conflict error, do not increase 'retry' but only delay.
activity_tool.SQLBase_reactivate(table=self.sql_table,
uid=delay_uid_list, delay=VALIDATION_ERROR_DELAY, retry=None)
self.reactivateMessageList(db, delay_uid_list,
VALIDATION_ERROR_DELAY, False)
except:
self._log(ERROR, 'Failed to delay %r' % delay_uid_list)
if final_error_uid_list:
try:
activity_tool.SQLBase_assignMessage(table=self.sql_table,
uid=final_error_uid_list, processing_node=INVOKE_ERROR_STATE)
self.assignMessageList(db, INVOKE_ERROR_STATE, final_error_uid_list)
except:
self._log(ERROR, 'Failed to set message to error state for %r'
% final_error_uid_list)
if make_available_uid_list:
try:
self.makeMessageListAvailable(activity_tool=activity_tool,
uid_list=make_available_uid_list)
self.assignMessageList(db, 0, make_available_uid_list)
except:
self._log(ERROR, 'Failed to unreserve %r' % make_available_uid_list)
else:
......@@ -762,17 +761,14 @@ class SQLBase(Queue):
except AttributeError:
pass
line = getattr(message, 'line', None)
validate_value = VALID if line and line.processing_node != -1 else \
message.validate(self, activity_tool)
if validate_value == VALID:
if (line and line.processing_node != -1 or
not activity_tool.getDependentMessageList(message)):
# Try to invoke the message - what happens if invoke calls flushActivity ??
with ActivityRuntimeEnvironment(message):
activity_tool.invoke(message)
if message.getExecutionState() != MESSAGE_EXECUTED:
raise ActivityFlushError('Could not invoke %s on %s'
% (message.method_id, path))
elif validate_value is INVALID_PATH:
raise ActivityFlushError('The document %s does not exist' % path)
else:
raise ActivityFlushError('Could not validate %s on %s'
% (message.method_id, path))
......@@ -783,13 +779,14 @@ class SQLBase(Queue):
invoke(m)
activity_tool.unregisterMessage(self, m)
uid_list = []
for line in self._getMessageList(activity_tool, path=path, processing=0,
db = activity_tool.getSQLConnection()
for line in self._getMessageList(db, path=path,
**({'method_id': method_id} if method_id else {})):
uid_list.append(line.uid)
if invoke:
if invoke and line.processing_node <= 0:
invoke(Message.load(line.message, uid=line.uid, line=line))
if uid_list:
activity_tool.SQLBase_delMessage(table=self.sql_table, uid=uid_list)
self.deleteMessageList(db, uid_list)
# Required for tests
def timeShift(self, activity_tool, delay, processing_node=None):
......@@ -797,5 +794,8 @@ class SQLBase(Queue):
To simulate time shift, we simply substract delay from
all dates in message(_queue) table
"""
activity_tool.SQLBase_timeShift(table=self.sql_table, delay=delay,
processing_node=processing_node)
activity_tool.getSQLConnection().query("UPDATE %s SET"
" date = DATE_SUB(date, INTERVAL %s SECOND)"
% (self.sql_table, delay)
+ ('' if processing_node is None else
"WHERE processing_node=%s" % processing_node))
......@@ -26,6 +26,7 @@
#
##############################################################################
from Shared.DC.ZRDB.Results import Results
from Products.CMFActivity.ActivityTool import Message
import sys
#from time import time
......@@ -74,8 +75,9 @@ class SQLDict(SQLBase):
message_list = activity_buffer.getMessageList(self)
return [m for m in message_list if m.is_registered]
def getProcessableMessageLoader(self, activity_tool, processing_node):
def getProcessableMessageLoader(self, db, processing_node):
path_and_method_id_dict = {}
quote = db.string_literal
def load(line):
# getProcessableMessageList already fetch messages with the same
# group_method_id, so what remains to be filtered on are path and
......@@ -87,6 +89,8 @@ class SQLDict(SQLBase):
uid = line.uid
original_uid = path_and_method_id_dict.get(key)
if original_uid is None:
sql_method_id = " AND method_id = %s AND group_method_id = %s" % (
quote(method_id), quote(line.group_method_id))
m = Message.load(line.message, uid=uid, line=line)
merge_parent = m.activity_kw.get('merge_parent')
try:
......@@ -101,11 +105,14 @@ class SQLDict(SQLBase):
path_list.append(path)
uid_list = []
if path_list:
result = activity_tool.SQLDict_selectParentMessage(
path=path_list,
method_id=method_id,
group_method_id=line.group_method_id,
processing_node=processing_node)
# Select parent messages.
result = Results(db.query("SELECT * FROM message"
" WHERE processing_node IN (0, %s) AND path IN (%s)%s"
" ORDER BY path LIMIT 1 FOR UPDATE" % (
processing_node,
','.join(map(quote, path_list)),
sql_method_id,
), 0))
if result: # found a parent
# mark child as duplicate
uid_list.append(uid)
......@@ -115,29 +122,32 @@ class SQLDict(SQLBase):
uid = line.uid
m = Message.load(line.message, uid=uid, line=line)
# return unreserved similar children
result = activity_tool.SQLDict_selectChildMessageList(
path=line.path,
method_id=method_id,
group_method_id=line.group_method_id)
reserve_uid_list = [x.uid for x in result]
path = line.path
result = db.query("SELECT uid FROM message"
" WHERE processing_node = 0 AND (path = %s OR path LIKE %s)"
"%s FOR UPDATE" % (
quote(path), quote(path.replace('_', r'\_') + '/%'),
sql_method_id,
), 0)[1]
reserve_uid_list = [x for x, in result]
uid_list += reserve_uid_list
if not line.processing_node:
# reserve found parent
reserve_uid_list.append(uid)
else:
result = activity_tool.SQLDict_selectDuplicatedLineList(
path=path,
method_id=method_id,
group_method_id=line.group_method_id)
reserve_uid_list = uid_list = [x.uid for x in result]
# Select duplicates.
result = db.query("SELECT uid FROM message"
" WHERE processing_node = 0 AND path = %s%s FOR UPDATE" % (
quote(path), sql_method_id,
), 0)[1]
reserve_uid_list = uid_list = [x for x, in result]
if reserve_uid_list:
activity_tool.SQLDict_reserveDuplicatedLineList(
processing_node=processing_node, uid=reserve_uid_list)
self.assignMessageList(db, processing_node, reserve_uid_list)
else:
activity_tool.SQLDict_commit() # release locks
db.query("COMMIT") # XXX: useful ?
except:
self._log(WARNING, 'getDuplicateMessageUidList got an exception')
activity_tool.SQLDict_rollback() # release locks
self._log(WARNING, 'Failed to reserve duplicates')
db.query("ROLLBACK")
raise
if uid_list:
self._log(TRACE, 'Reserved duplicate messages: %r' % uid_list)
......
......@@ -31,7 +31,7 @@ from zLOG import LOG, TRACE, INFO, WARNING, ERROR, PANIC
import MySQLdb
from MySQLdb.constants.ER import DUP_ENTRY
from SQLBase import (
SQLBase, sort_message_key, MAX_MESSAGE_LIST_SIZE,
SQLBase, sort_message_key,
UID_SAFE_BITSIZE, UID_ALLOCATION_TRY_COUNT,
)
from Products.CMFActivity.ActivityTool import Message
......@@ -45,77 +45,103 @@ class SQLJoblib(SQLDict):
sql_table = 'message_job'
uid_group = 'portal_activity_job'
def initialize(self, activity_tool, clear):
"""
Initialize the message table using MYISAM Engine
"""
folder = activity_tool.getPortalObject().portal_skins.activity
try:
createMessageTable = folder.SQLJoblib_createMessageTable
except AttributeError:
return
if clear:
folder.SQLBase_dropMessageTable(table=self.sql_table)
createMessageTable()
else:
src = createMessageTable._upgradeSchema(create_if_not_exists=1,
initialize=self._initialize,
table=self.sql_table)
if src:
LOG('CMFActivity', INFO, "%r table upgraded\n%s"
% (self.sql_table, src))
def createTableSQL(self):
return """\
CREATE TABLE %s (
`uid` BIGINT UNSIGNED NOT NULL,
`date` DATETIME(6) NOT NULL,
`path` VARCHAR(255) NOT NULL,
`active_process_uid` INT UNSIGNED NULL,
`method_id` VARCHAR(255) NOT NULL,
`processing_node` SMALLINT NOT NULL DEFAULT -1,
`priority` TINYINT NOT NULL DEFAULT 0,
`group_method_id` VARCHAR(255) NOT NULL DEFAULT '',
`tag` VARCHAR(255) NOT NULL,
`signature` BINARY(16) NOT NULL,
`serialization_tag` VARCHAR(255) NOT NULL,
`retry` TINYINT UNSIGNED NOT NULL DEFAULT 0,
`message` LONGBLOB NOT NULL,
PRIMARY KEY (`uid`),
KEY `processing_node_priority_date` (`processing_node`, `priority`, `date`),
KEY `node_group_priority_date` (`processing_node`, `group_method_id`, `priority`, `date`),
KEY `serialization_tag_processing_node` (`serialization_tag`, `processing_node`),
KEY (`path`),
KEY (`active_process_uid`),
KEY (`method_id`),
KEY (`tag`)
) ENGINE=InnoDB""" % self.sql_table
def generateMessageUID(self, m):
return (tuple(m.object_path), m.method_id, m.activity_kw.get('signature'),
m.activity_kw.get('tag'), m.activity_kw.get('group_id'))
_insert_template = ("INSERT INTO %s (uid,"
" path, active_process_uid, date, method_id, processing_node,"
" priority, group_method_id, tag, signature, serialization_tag,"
" message) VALUES\n(%s)")
def prepareQueueMessageList(self, activity_tool, message_list):
registered_message_list = [m for m in message_list if m.is_registered]
portal = activity_tool.getPortalObject()
for i in xrange(0, len(registered_message_list), MAX_MESSAGE_LIST_SIZE):
message_list = registered_message_list[i:i+MAX_MESSAGE_LIST_SIZE]
path_list = ['/'.join(m.object_path) for m in message_list]
active_process_uid_list = [m.active_process_uid for m in message_list]
method_id_list = [m.method_id for m in message_list]
priority_list = [m.activity_kw.get('priority', 1) for m in message_list]
date_list = [m.activity_kw.get('at_date') for m in message_list]
group_method_id_list = [m.getGroupId() for m in message_list]
tag_list = [m.activity_kw.get('tag', '') for m in message_list]
signature_list=[m.activity_kw.get('signature', '') for m in message_list]
serialization_tag_list = [m.activity_kw.get('serialization_tag', '')
for m in message_list]
processing_node_list = []
for m in message_list:
m.order_validation_text = x = self.getOrderValidationText(m)
processing_node_list.append(0 if x == 'none' else -1)
db = activity_tool.getSQLConnection()
quote = db.string_literal
def insert(reset_uid):
values = self._insert_separator.join(values_list)
del values_list[:]
for _ in xrange(UID_ALLOCATION_TRY_COUNT):
if reset_uid:
reset_uid = False
# Overflow will result into IntegrityError.
db.query("SET @uid := %s" % getrandbits(UID_SAFE_BITSIZE))
try:
portal.SQLJoblib_writeMessage(
uid_list=[
getrandbits(UID_SAFE_BITSIZE)
for _ in xrange(len(message_list))
],
path_list=path_list,
active_process_uid_list=active_process_uid_list,
method_id_list=method_id_list,
priority_list=priority_list,
message_list=map(Message.dump, message_list),
group_method_id_list=group_method_id_list,
date_list=date_list,
tag_list=tag_list,
processing_node_list=processing_node_list,
signature_list=signature_list,
serialization_tag_list=serialization_tag_list)
db.query(self._insert_template % (self.sql_table, values))
except MySQLdb.IntegrityError, (code, _):
if code != DUP_ENTRY:
raise
reset_uid = True
else:
break
else:
raise ValueError("Maximum retry for SQLBase_writeMessageList reached")
raise ValueError("Maximum retry for prepareQueueMessageList reached")
i = 0
reset_uid = True
values_list = []
max_payload = self._insert_max_payload
sep_len = len(self._insert_separator)
for m in message_list:
if m.is_registered:
active_process_uid = m.active_process_uid
order_validation_text = m.order_validation_text = \
self.getOrderValidationText(m)
date = m.activity_kw.get('at_date')
row = ','.join((
'@uid+%s' % i,
quote('/'.join(m.object_path)),
'NULL' if active_process_uid is None else str(active_process_uid),
"UTC_TIMESTAMP(6)" if date is None else quote(render_datetime(date)),
quote(m.method_id),
'0' if order_validation_text == 'none' else '-1',
str(m.activity_kw.get('priority', 1)),
quote(m.getGroupId()),
quote(m.activity_kw.get('tag', '')),
quote(m.activity_kw.get('signature', '')),
quote(m.activity_kw.get('serialization_tag', '')),
quote(Message.dump(m))))
i += 1
n = sep_len + len(row)
max_payload -= n
if max_payload < 0:
if values_list:
insert(reset_uid)
reset_uid = False
max_payload = self._insert_max_payload - n
else:
raise ValueError("max_allowed_packet too small to insert message")
values_list.append(row)
if values_list:
insert(reset_uid)
def getProcessableMessageLoader(self, activity_tool, processing_node):
def getProcessableMessageLoader(self, db, processing_node):
path_and_method_id_dict = {}
quote = db.string_literal
def load(line):
# getProcessableMessageList already fetch messages with the same
# group_method_id, so what remains to be filtered on are path, method_id
......@@ -128,19 +154,21 @@ class SQLJoblib(SQLDict):
if original_uid is None:
m = Message.load(line.message, uid=uid, line=line)
try:
result = activity_tool.SQLJoblib_selectDuplicatedLineList(
path=path,
method_id=method_id,
group_method_id=line.group_method_id,
signature=line.signature)
reserve_uid_list = uid_list = [x.uid for x in result]
if reserve_uid_list:
activity_tool.SQLBase_reserveMessageList(
table=self.sql_table,
processing_node=processing_node,
uid=reserve_uid_list)
# Select duplicates.
result = db.query("SELECT uid FROM message_job"
" WHERE processing_node = 0 AND path = %s AND signature = %s"
" AND method_id = %s AND group_method_id = %s FOR UPDATE" % (
quote(path), quote(line.signature),
quote(method_id), quote(line.group_method_id),
), 0)[1]
uid_list = [x for x, in result]
if uid_list:
self.assignMessageList(db, processing_node, uid_list)
else:
db.query("COMMIT") # XXX: useful ?
except:
self._log(WARNING, 'getDuplicateMessageUidList got an exception')
self._log(WARNING, 'Failed to reserve duplicates')
db.query("ROLLBACK")
raise
if uid_list:
self._log(TRACE, 'Reserved duplicate messages: %r' % uid_list)
......
......@@ -57,6 +57,7 @@ from Products.ERP5Type.UnrestrictedMethod import PrivilegedUser
from zope.site.hooks import setSite
import transaction
from App.config import getConfiguration
from Shared.DC.ZRDB.Results import Results
import Products.Localizer.patches
localizer_lock = Products.Localizer.patches._requests_lock
......@@ -191,7 +192,6 @@ class Message(BaseMessage):
call_traceback = None
exc_info = None
is_executed = MESSAGE_NOT_EXECUTED
processing = None
traceback = None
oid = None
is_registered = False
......@@ -367,11 +367,6 @@ class Message(BaseMessage):
except:
self.setExecutionState(MESSAGE_NOT_EXECUTED, context=activity_tool)
def validate(self, activity, activity_tool, check_order_validation=1):
return activity.validate(activity_tool, self,
check_order_validation=check_order_validation,
**self.activity_kw)
def notifyUser(self, activity_tool, retry=False):
"""Notify the user that the activity failed."""
portal = activity_tool.getPortalObject()
......@@ -655,11 +650,6 @@ class ActivityTool (BaseTool):
activity_timing_log = False
cancel_and_invoke_links_hidden = False
def SQLDict_setPriority(self, **kw):
real_SQLDict_setPriority = getattr(self.aq_parent, 'SQLDict_setPriority')
LOG('ActivityTool', 0, real_SQLDict_setPriority(src__=1, **kw))
return real_SQLDict_setPriority(**kw)
# Filter content (ZMI))
def filtered_meta_types(self, user=None):
# Filters the list of available meta types.
......@@ -670,6 +660,9 @@ class ActivityTool (BaseTool):
meta_types.append(meta_type)
return meta_types
def getSQLConnection(self):
return self.aq_inner.aq_parent.cmf_activity_sql_connection()
def maybeMigrateConnectionClass(self):
connection_id = 'cmf_activity_sql_connection'
sql_connection = getattr(self, connection_id, None)
......@@ -689,6 +682,20 @@ class ActivityTool (BaseTool):
self.maybeMigrateConnectionClass()
for activity in activity_dict.itervalues():
activity.initialize(self, clear=False)
# Remove old skin if any.
skins_tool = self.getPortalObject().portal_skins
name = 'activity'
if (getattr(skins_tool.get(name), '_dirpath', None)
== 'Products.CMFActivity:skins/activity'):
for selection, skins in skins_tool.getSkinPaths():
skins = skins.split(',')
try:
skins.remove(name)
except ValueError:
continue
skins_tool.manage_skinLayers(
add_skin=1, skinname=selection, skinpath=skins)
skins_tool._delObject(name)
def _callSafeFunction(self, batch_function):
return batch_function()
......@@ -1127,14 +1134,16 @@ class ActivityTool (BaseTool):
def hasActivity(self, *args, **kw):
# Check in each queue if the object has deferred tasks
# if not argument is provided, then check on self
if len(args) > 0:
obj = args[0]
if args:
obj, = args
else:
obj = self
for activity in activity_dict.itervalues():
if activity.hasActivity(aq_inner(self), obj, **kw):
return True
return False
path = None if obj is None else '/'.join(obj.getPhysicalPath())
db = self.getSQLConnection()
quote = db.string_literal
return bool(db.query("(%s)" % ") UNION ALL (".join(
activity.hasActivitySQL(quote, path=path, **kw)
for activity in activity_dict.itervalues()))[1])
security.declarePrivate('getActivityBuffer')
def getActivityBuffer(self, create_if_not_found=True):
......@@ -1443,8 +1452,9 @@ class ActivityTool (BaseTool):
"""
if not(isinstance(message_uid_list, list)):
message_uid_list = [message_uid_list]
self.SQLBase_makeMessageListAvailable(table=activity_dict[activity].sql_table,
uid=message_uid_list)
if message_uid_list:
activity_dict[activity].unreserveMessageList(self.getSQLConnection(),
0, message_uid_list)
if REQUEST is not None:
return REQUEST.RESPONSE.redirect('%s/%s' % (
self.absolute_url(), 'view'))
......@@ -1470,8 +1480,8 @@ class ActivityTool (BaseTool):
"""
if not(isinstance(message_uid_list, list)):
message_uid_list = [message_uid_list]
self.SQLBase_delMessage(table=activity_dict[activity].sql_table,
uid=message_uid_list)
activity_dict[activity].deleteMessageList(
self.getSQLConnection(), message_uid_list)
if REQUEST is not None:
return REQUEST.RESPONSE.redirect('%s/%s' % (
self.absolute_url(), 'view'))
......@@ -1523,10 +1533,7 @@ class ActivityTool (BaseTool):
"""
Return the number of messages which match the given tag.
"""
message_count = 0
for activity in activity_dict.itervalues():
message_count += activity.countMessageWithTag(aq_inner(self), value)
return message_count
return self.countMessage(tag=value)
security.declarePublic('countMessage')
def countMessage(self, **kw):
......@@ -1540,10 +1547,11 @@ class ActivityTool (BaseTool):
tag : activities with a particular tag
message_uid : activities with a particular uid
"""
message_count = 0
for activity in activity_dict.itervalues():
message_count += activity.countMessage(aq_inner(self), **kw)
return message_count
db = self.getSQLConnection()
quote = db.string_literal
return sum(x for x, in db.query("(%s)" % ") UNION ALL (".join(
activity.countMessageSQL(quote, **kw)
for activity in activity_dict.itervalues()))[1])
security.declareProtected( CMFCorePermissions.ManagePortal , 'newActiveProcess' )
def newActiveProcess(self, REQUEST=None, **kw):
......@@ -1554,23 +1562,31 @@ class ActivityTool (BaseTool):
REQUEST['RESPONSE'].redirect( 'manage_main' )
return obj
# Active synchronisation methods
security.declarePrivate('validateOrder')
def validateOrder(self, message, validator_id, validation_value):
message_list = self.getDependentMessageList(message, validator_id, validation_value)
return len(message_list) > 0
security.declarePrivate('getDependentMessageList')
def getDependentMessageList(self, message, validator_id, validation_value):
message_list = []
method_id = "_validate_" + validator_id
def getDependentMessageList(self, message, validating_queue=None):
activity_kw = message.activity_kw
db = self.getSQLConnection()
quote = db.string_literal
queries = []
for activity in activity_dict.itervalues():
method = getattr(activity, method_id, None)
if method is not None:
result = method(aq_inner(self), message, validation_value)
if result:
message_list += [(activity, m) for m in result]
return message_list
q = activity.getValidationSQL(
quote, activity_kw, activity is validating_queue)
if q:
queries.append(q)
if queries:
message_list = []
for line in Results(db.query("(%s)" % ") UNION ALL (".join(queries))):
activity = activity_dict[line.activity]
m = Message.load(line.message,
line=line,
uid=line.uid,
date=line.date,
processing_node=line.processing_node)
if not hasattr(m, 'order_validation_text'): # BBB
m.order_validation_text = activity.getOrderValidationText(m)
message_list.append((activity, m))
return message_list
return ()
# Required for tests (time shift)
def timeShift(self, delay):
......
#!/bin/sh
set -e
# Small watching script based on Sébastien idea.
# ideas:
# - more control on what would be displayed
......@@ -32,13 +31,47 @@ INTERVAL=$2
exit 1
}
SELECT=""
for t in message message_queue ; do
SELECT=$SELECT"""
SELECT count(*) AS $t, ${text_group:-method_id}, processing, processing_node AS node, min(priority) AS min_pri, max(priority) AS max_pri FROM $t GROUP BY ${text_group:-method_id}, processing, processing_node ORDER BY node;
SELECT count(*) AS $t, processing, processing_node, min(priority) AS min_pri, max(priority) AS max_pri FROM $t GROUP BY processing, processing_node;
SELECT priority as pri, MIN(timediff(NOW(), date)) AS min, AVG(timediff(NOW() , date)) AS avg, MAX(timediff(NOW() , date)) AS max FROM $t GROUP BY priority;
SELECT count(*) AS ${t}_count FROM $t;
"""
node_priority_cols="processing_node AS node, MIN(priority) AS min_pri, MAX(priority) AS max_pri"
for t in message:dict message_queue:queue message_job:joblib; do
table=${t%:*}
t=${t#*:}
create=$create"
CREATE TEMPORARY TABLE _$t(
n INT UNSIGNED NOT NULL,
${text_group:-method_id} VARCHAR(255) NOT NULL,
processing_node SMALLINT NOT NULL,
priority TINYINT NOT NULL,
min_date DATETIME(6) NOT NULL,
max_date DATETIME(6) NOT NULL,
max_retry TINYINT UNSIGNED NOT NULL
) ENGINE=MEMORY;"
collect=$collect"
INSERT INTO _$t SELECT count(*), ${text_group:-method_id},
processing_node, priority, MIN(date), MAX(date), MAX(retry) FROM $table
GROUP BY processing_node, priority, ${text_group:-method_id};"
select=$select"
SELECT IFNULL(SUM(n),0) AS $t, ${text_group:-method_id},
$node_priority_cols, MAX(max_retry) AS max_retry FROM _$t
GROUP BY processing_node, ${text_group:-method_id}
ORDER BY processing_node, ${text_group:-method_id};
SELECT priority,
TIME_FORMAT(TIMEDIFF(UTC_TIMESTAMP(6), MAX(max_date)), \"%T\") AS min,
TIME_FORMAT(TIMEDIFF(UTC_TIMESTAMP(6), MIN(min_date)), \"%T\") AS max
FROM _$t GROUP BY priority ORDER BY priority;"
[ "$count" ] && {
not_processing=$not_processing" UNION ALL "
count=$count,
}
not_processing=$not_processing"
SELECT IFNULL(SUM(n),0) AS count, $node_priority_cols,
MIN(min_date) AS min_date, MAX(max_date) AS max_date
FROM _$t WHERE processing_node<=0 GROUP BY processing_node"
count=$count"(SELECT IFNULL(SUM(n),0) AS $t FROM _$t) as $t"
total=$total+$t
done
exec watch -n ${INTERVAL:-5} "${MYSQL:-mysql} $MYSQL_OPT --disable-pager -t -e '$SELECT' "
exec watch -n ${INTERVAL:-5} "${MYSQL:-mysql} $MYSQL_OPT --disable-pager -t -e '
SET autocommit=off;$create$collect
SELECT *, $total as total FROM $count;$select
SELECT SUM(count) as count, node, MIN(min_pri) AS min_pri, MAX(max_pri) AS max_pri,
MIN(min_date) AS min_date, MAX(max_date) AS max_date
FROM ($not_processing) as t GROUP BY node;'"
......@@ -50,7 +50,6 @@ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
<th align="left" valign="top">Named Parameters</th>
<th align="left" valign="top">Processing Node</th>
<th align="left" valign="top">Retry</th>
<th align="left" valign="top">Processing</th>
<th align="left" valign="top">Call Traceback</th>
</tr>
<dtml-in expr="getMessageList()">
......@@ -84,11 +83,6 @@ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
</td>
<td align="left" valign="top"><dtml-var processing_node></td>
<td align="left" valign="top"><dtml-var retry></td>
<td align="left" valign="top">
<dtml-if expr="processing is not None">
<dtml-var processing>
</dtml-if>
</td>
<td align="left" valign="top">
<dtml-if expr="call_traceback is not None">
<pre><dtml-var call_traceback></pre>
......
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
processing_node
uid:list
</params>
UPDATE
<dtml-var table>
SET
processing_node=<dtml-sqlvar processing_node type="int">,
processing=0
WHERE
<dtml-sqltest uid type="int" multiple>
<dtml-var sql_delimiter>
COMMIT
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1000
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table</params>
CREATE TABLE <dtml-var table> (
`uid` BIGINT UNSIGNED NOT NULL,
`date` DATETIME(6) NOT NULL,
`path` VARCHAR(255) NOT NULL,
`active_process_uid` INT UNSIGNED NULL,
`method_id` VARCHAR(255) NOT NULL,
`processing_node` SMALLINT NOT NULL DEFAULT -1,
`processing` TINYINT NOT NULL DEFAULT 0,
`processing_date` DATETIME(6),
`priority` TINYINT NOT NULL DEFAULT 0,
`group_method_id` VARCHAR(255) NOT NULL DEFAULT '',
`tag` VARCHAR(255) NOT NULL,
`serialization_tag` VARCHAR(255) NOT NULL,
`retry` TINYINT UNSIGNED NOT NULL DEFAULT 0,
`message` LONGBLOB NOT NULL,
PRIMARY KEY (`uid`),
KEY (`path`),
KEY (`active_process_uid`),
KEY (`method_id`),
KEY `processing_node_processing` (`processing_node`, `processing`),
KEY `processing_node_priority_date` (`processing_node`, `priority`, `date`),
KEY `node_group_priority_date` (`processing_node`, `group_method_id`, `priority`, `date`),
KEY `serialization_tag_processing_node` (`serialization_tag`, `processing_node`),
KEY (`priority`),
KEY (`tag`)
) ENGINE=InnoDB
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1000
max_cache:100
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
uid:list
</params>
DELETE FROM
<dtml-var table>
WHERE
<dtml-sqltest uid type="int" multiple>
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1000
max_cache:100
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table</params>
DROP TABLE IF EXISTS <dtml-var table>
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:0
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params></params>
SELECT UTC_TIMESTAMP(6)
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:0
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
</params>
SELECT `priority`, `date` FROM
<dtml-var table>
WHERE
processing_node = 0
AND date <= UTC_TIMESTAMP(6)
ORDER BY priority, date
LIMIT 1
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:0
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
processing_node
to_date
count
group_method_id
</params>
SELECT
*
FROM
<dtml-var table>
WHERE
processing_node=0
AND date <= <dtml-sqlvar to_date type="datetime(6)">
<dtml-if expr="group_method_id is not None">
AND group_method_id = <dtml-sqlvar group_method_id type="string">
</dtml-if>
ORDER BY
<dtml-comment>
During normal operation, sorting by date (as 2nd criteria) is fairer
for users and reduce the probability to do the same work several times
(think of an object that is modified several times in a short period of time).
</dtml-comment>
priority, date
LIMIT <dtml-sqlvar count type="int">
FOR UPDATE
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
path
method_id
active_process_uid
only_valid
only_invalid</params>
SELECT count(path) as message_count FROM
<dtml-var table>
WHERE 1 = 1
<dtml-if expr="path is not None">AND path = <dtml-sqlvar path type="string"> </dtml-if>
<dtml-if expr="method_id is not None">AND method_id = <dtml-sqlvar method_id type="string"></dtml-if>
<dtml-if expr="only_valid">AND processing_node > -2</dtml-if>
<dtml-if expr="only_invalid">AND processing_node < -1</dtml-if>
<dtml-if expr="active_process_uid is not None"> AND active_process_uid = <dtml-sqlvar active_process_uid type="int"> </dtml-if>
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:0
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
uid</params>
UPDATE
<dtml-var table>
SET
processing_node=0,
processing=0
WHERE
<dtml-sqltest uid type="int" multiple>
<dtml-var sql_delimiter>
COMMIT
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
uid</params>
UPDATE
<dtml-var table>
SET
processing_date = UTC_TIMESTAMP(6),
processing = 1
WHERE
<dtml-sqltest uid type="int" multiple>
<dtml-var sql_delimiter>
COMMIT
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1000
max_cache:100
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
uid:list
retry
delay
</params>
UPDATE
<dtml-var table>
SET
date = DATE_ADD(UTC_TIMESTAMP(6), INTERVAL
<dtml-sqlvar delay type="int"> SECOND)
<dtml-if expr="retry is not None">
, priority = priority + <dtml-sqlvar retry type="int">
, retry = retry + <dtml-sqlvar retry type="int">
</dtml-if>
WHERE
<dtml-sqltest uid type="int" multiple>
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:0
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
processing_node
uid
</params>
UPDATE
<dtml-var table>
SET
processing_node=<dtml-sqlvar processing_node type="int">
WHERE
<dtml-sqltest uid type="int" multiple>
<dtml-var sql_delimiter>
COMMIT
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:0
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
processing_node
group_method_id
count</params>
SELECT
*
FROM
<dtml-var table>
WHERE
processing_node = <dtml-sqlvar processing_node type="int">
<dtml-if expr="group_method_id is not None">
AND group_method_id = <dtml-sqlvar group_method_id type="string">
</dtml-if>
<dtml-if expr="count is not None">
LIMIT <dtml-sqlvar count type="int">
</dtml-if>
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
delay
processing_node</params>
UPDATE
<dtml-var table>
SET
date = DATE_SUB(date, INTERVAL <dtml-sqlvar delay type="int"> SECOND),
processing_date = DATE_SUB(processing_date, INTERVAL <dtml-sqlvar delay type="int"> SECOND)
<dtml-if expr="processing_node is not None">
WHERE <dtml-sqltest processing_node type="int">
</dtml-if>
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1000
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
method_id
message_uid
path
tag
count
serialization_tag
</params>
SELECT
<dtml-if expr="count">
COUNT(*) AS uid_count
<dtml-else>
*
</dtml-if>
FROM
<dtml-var table>
WHERE
processing_node > -10
<dtml-if expr="method_id">
AND method_id IN (
<dtml-in method_id><dtml-sqlvar sequence-item type="string"><dtml-if sequence-end><dtml-else>,</dtml-if></dtml-in>
)
</dtml-if>
<dtml-if expr="message_uid is not None">AND uid = <dtml-sqlvar message_uid type="int"> </dtml-if>
<dtml-if expr="path">
AND path IN (
<dtml-in path><dtml-sqlvar sequence-item type="string"><dtml-if sequence-end><dtml-else>,</dtml-if></dtml-in>
)
</dtml-if>
<dtml-if expr="tag">
AND tag IN (
<dtml-in tag><dtml-sqlvar sequence-item type="string"><dtml-if sequence-end><dtml-else>,</dtml-if></dtml-in>
)
</dtml-if>
<dtml-if expr="serialization_tag is not None">
AND processing_node > -1
AND serialization_tag = <dtml-sqlvar serialization_tag type="string">
</dtml-if>
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1000
max_cache:100
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>table
uid_list
path_list
active_process_uid_list
method_id_list
message_list
priority_list
processing_node_list
date_list
group_method_id_list
tag_list
serialization_tag_list
</params>
INSERT INTO <dtml-var table>
(uid, path, active_process_uid, date, method_id, processing_node, processing, priority, group_method_id, tag, serialization_tag, message)
VALUES
<dtml-in prefix="loop" expr="_.range(_.len(path_list))">
<dtml-if sequence-start><dtml-else>,</dtml-if>
(
<dtml-sqlvar expr="uid_list[loop_item]" type="int">,
<dtml-sqlvar expr="path_list[loop_item]" type="string">,
<dtml-sqlvar expr="active_process_uid_list[loop_item]" type="int" optional>,
<dtml-if expr="date_list[loop_item] is not None"><dtml-sqlvar expr="date_list[loop_item]" type="datetime(6)"><dtml-else>UTC_TIMESTAMP(6)</dtml-if>,
<dtml-sqlvar expr="method_id_list[loop_item]" type="string">,
<dtml-sqlvar expr="processing_node_list[loop_item]" type="int">,
0,
<dtml-sqlvar expr="priority_list[loop_item]" type="int">,
<dtml-sqlvar expr="group_method_id_list[loop_item]" type="string">,
<dtml-sqlvar expr="tag_list[loop_item]" type="string">,
<dtml-sqlvar expr="serialization_tag_list[loop_item]" type="string">,
<dtml-sqlvar expr="message_list[loop_item]" type="string">
)
</dtml-in>
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1000
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params></params>
COMMIT
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:0
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>
processing_node
uid
</params>
UPDATE
message
SET
processing_node=<dtml-sqlvar processing_node type="int">
WHERE
<dtml-sqltest uid type="int" multiple>
<dtml-var sql_delimiter>
COMMIT
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1000
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params></params>
ROLLBACK
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:0
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>
path
method_id
group_method_id
</params>
SELECT uid FROM
message
WHERE
processing_node = 0
AND (path = <dtml-sqlvar path type="string">
OR path LIKE <dtml-sqlvar type="string"
expr="path.replace('_', r'\_') + '/%'">)
AND method_id = <dtml-sqlvar method_id type="string">
AND group_method_id = <dtml-sqlvar group_method_id type="string">
FOR UPDATE
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:0
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>
path
method_id
group_method_id
</params>
SELECT uid FROM
message
WHERE
processing_node = 0
AND path = <dtml-sqlvar path type="string">
AND method_id = <dtml-sqlvar method_id type="string">
AND group_method_id = <dtml-sqlvar group_method_id type="string">
FOR UPDATE
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:0
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>
path
method_id
group_method_id
processing_node
</params>
SELECT * FROM
message
WHERE
processing_node IN (0, <dtml-sqlvar processing_node type="int">)
AND <dtml-sqltest path type="string" multiple>
AND method_id = <dtml-sqlvar method_id type="string">
AND group_method_id = <dtml-sqlvar group_method_id type="string">
ORDER BY path
LIMIT 1
FOR UPDATE
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1000
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params></params>
CREATE TABLE message_job (
`uid` BIGINT UNSIGNED NOT NULL,
`date` DATETIME(6) NOT NULL,
`path` VARCHAR(255) NOT NULL,
`active_process_uid` INT UNSIGNED NULL,
`method_id` VARCHAR(255) NOT NULL,
`processing_node` SMALLINT NOT NULL DEFAULT -1,
`processing` TINYINT NOT NULL DEFAULT 0,
`processing_date` DATETIME(6),
`priority` TINYINT NOT NULL DEFAULT 0,
`group_method_id` VARCHAR(255) NOT NULL DEFAULT '',
`tag` VARCHAR(255) NOT NULL,
`signature` BINARY(16) NOT NULL,
`serialization_tag` VARCHAR(255) NOT NULL,
`retry` TINYINT UNSIGNED NOT NULL DEFAULT 0,
`message` LONGBLOB NOT NULL,
PRIMARY KEY (`uid`),
KEY (`path`),
KEY (`active_process_uid`),
KEY (`method_id`),
KEY `processing_node_processing` (`processing_node`, `processing`),
KEY `processing_node_priority_date` (`processing_node`, `priority`, `date`),
KEY `node_group_priority_date` (`processing_node`, `group_method_id`, `priority`, `date`),
KEY `serialization_tag_processing_node` (`serialization_tag`, `processing_node`),
KEY (`priority`),
KEY (`tag`)
) ENGINE=InnoDB
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:0
max_cache:0
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>
path
method_id
group_method_id
signature
</params>
SELECT uid FROM
message_job
WHERE
processing_node = 0
AND path = <dtml-sqlvar path type="string">
AND method_id = <dtml-sqlvar method_id type="string">
AND group_method_id = <dtml-sqlvar group_method_id type="string">
AND signature = <dtml-sqlvar signature type="string">
FOR UPDATE
<dtml-comment>
title:
connection_id:cmf_activity_sql_connection
max_rows:1000
max_cache:100
cache_time:0
class_name:
class_file:
</dtml-comment>
<params>
uid_list
path_list
active_process_uid_list
method_id_list
message_list
priority_list
processing_node_list
date_list
group_method_id_list
tag_list
signature_list
serialization_tag_list
</params>
INSERT INTO message_job
(uid, path, active_process_uid, date, method_id, processing_node, processing, priority, group_method_id, tag, signature, serialization_tag, message)
VALUES
<dtml-in prefix="loop" expr="_.range(_.len(path_list))">
<dtml-if sequence-start><dtml-else>,</dtml-if>
(
<dtml-sqlvar expr="uid_list[loop_item]" type="int">,
<dtml-sqlvar expr="path_list[loop_item]" type="string">,
<dtml-sqlvar expr="active_process_uid_list[loop_item]" type="int" optional>,
<dtml-if expr="date_list[loop_item] is not None"><dtml-sqlvar expr="date_list[loop_item]" type="datetime(6)"><dtml-else>UTC_TIMESTAMP(6)</dtml-if>,
<dtml-sqlvar expr="method_id_list[loop_item]" type="string">,
<dtml-sqlvar expr="processing_node_list[loop_item]" type="int">,
0,
<dtml-sqlvar expr="priority_list[loop_item]" type="int">,
<dtml-sqlvar expr="group_method_id_list[loop_item]" type="string">,
<dtml-sqlvar expr="tag_list[loop_item]" type="string">,
<dtml-sqlvar expr="signature_list[loop_item]" type="string">,
<dtml-sqlvar expr="serialization_tag_list[loop_item]" type="string">,
<dtml-sqlvar expr="message_list[loop_item]" type="string">
)
</dtml-in>
......@@ -28,23 +28,24 @@
import inspect
import unittest
from functools import wraps
from Products.ERP5Type.tests.utils import LogInterceptor
from Testing import ZopeTestCase
from Products.ERP5Type.tests.ERP5TypeTestCase import ERP5TypeTestCase
from Products.ERP5Type.tests.utils import createZODBPythonScript
from Products.ERP5Type.Base import Base
from Products.CMFActivity import ActivityTool
from Products.CMFActivity.Activity.SQLBase import INVOKE_ERROR_STATE
from Products.CMFActivity.Activity.Queue import VALIDATION_ERROR_DELAY
from Products.CMFActivity.Activity.SQLDict import SQLDict
import Products.CMFActivity.ActivityTool
from Products.CMFActivity.Errors import ActivityPendingError, ActivityFlushError
from erp5.portal_type import Organisation
from AccessControl.SecurityManagement import newSecurityManager
from zLOG import LOG
from ZODB.POSException import ConflictError
from DateTime import DateTime
from Products.CMFActivity.ActivityTool import Message
from Products.CMFActivity.ActivityTool import (
cancelProcessShutdown, Message, getCurrentNode, getServerAddress)
from _mysql_exceptions import OperationalError
from Products.ZMySQLDA.db import DB
from sklearn.externals.joblib.hashing import hash as joblib_hash
......@@ -53,7 +54,6 @@ import random
import threading
import weakref
import transaction
from Products.CMFActivity.ActivityTool import getCurrentNode, getServerAddress
from App.config import getConfiguration
from asyncore import socket_map
import socket
......@@ -61,6 +61,15 @@ import socket
class CommitFailed(Exception):
pass
def for_each_activity(wrapped):
def wrapper(self):
getMessageList = self.portal.portal_activities.getMessageList
for activity in ActivityTool.activity_dict:
wrapped(self, activity)
self.abort()
self.assertFalse(getMessageList())
return wraps(wrapped)(wrapper)
def registerFailingTransactionManager(*args, **kw):
from Shared.DC.ZRDB.TM import TM
class dummy_tm(TM):
......@@ -109,6 +118,30 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
o1 = organisation_module.newContent(id=self.company_id)
self.tic()
def tearDown(self):
# Override ERP5 tearDown to make sure that tests do not leave unprocessed
# activity messages. We are testing CMFActivity so it's important to check
# that everything works as expected on this subject.
try:
if self._resultForDoCleanups.wasSuccessful():
getMessageList = self.portal.portal_activities.getMessageList
self.assertFalse(getMessageList())
# Also check if a test drop them without committing.
self.abort()
self.assertFalse(getMessageList())
finally:
ERP5TypeTestCase.tearDown(self)
def getMessageList(self, activity, **kw):
return ActivityTool.activity_dict[activity].getMessageList(
self.portal.portal_activities, **kw)
def deleteMessageList(self, activity, message_list):
ActivityTool.activity_dict[activity].deleteMessageList(
self.portal.portal_activities.getSQLConnection(),
[m.uid for m in message_list])
self.commit()
def login(self):
uf = self.portal.acl_users
uf._doAddUser('seb', '', ['Manager'], [])
......@@ -116,7 +149,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
user = uf.getUserById('seb').__of__(uf)
newSecurityManager(None, user)
def InvokeAndCancelActivity(self, activity):
@for_each_activity
def testInvokeAndCancelActivity(self, activity):
"""
Simple test where we invoke and cancel an activity
"""
......@@ -144,10 +178,9 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
# Needed so that the message are removed from the queue
self.commit()
self.assertEqual(self.title2,organisation.getTitle())
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),0)
def DeferredSetTitleActivity(self, activity):
@for_each_activity
def testDeferredSetTitleActivity(self, activity):
"""
We check that the title is changed only after that
the activity was called
......@@ -162,10 +195,9 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.assertEqual(self.title1,organisation.getTitle())
activity_tool.tic()
self.assertEqual(self.title2,organisation.getTitle())
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),0)
def CallOnceWithActivity(self, activity):
@for_each_activity
def testCallOnceWithActivity(self, activity):
"""
With this test we can check if methods are called
only once (sometimes it was twice !!!)
......@@ -201,11 +233,10 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
activity_tool.manageInvoke(organisation.getPhysicalPath(),'setFoobar')
# Needed so that the message are commited into the queue
self.commit()
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),0)
self.assertEqual(2,organisation.getFoobar())
def TryFlushActivity(self, activity):
@for_each_activity
def testTryFlushActivity(self, activity):
"""
Check the method flush
"""
......@@ -227,7 +258,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.assertEqual(organisation.getTitle(),self.title2)
self.commit()
def TryActivateInsideFlush(self, activity):
@for_each_activity
def testTryActivateInsideFlush(self, activity):
"""
Create a new activity inside a flush action
"""
......@@ -242,11 +274,10 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.commit()
activity_tool.tic()
self.commit()
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),0)
self.assertEqual(organisation.getTitle(),self.title2)
def TryTwoMethods(self, activity):
@for_each_activity
def testTryTwoMethods(self, activity):
"""
Try several activities
"""
......@@ -266,12 +297,11 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
activity_tool.distribute()
activity_tool.tic()
self.commit()
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),0)
self.assertEqual(organisation.getTitle(),self.title1)
self.assertEqual(organisation.getDescription(),self.title1)
def TryTwoMethodsAndFlushThem(self, activity):
@for_each_activity
def testTryTwoMethodsAndFlushThem(self, activity):
"""
make sure flush works with several activities
"""
......@@ -292,8 +322,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
activity_tool.distribute()
activity_tool.tic()
self.commit()
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),0)
self.assertEqual(organisation.getTitle(),self.title1)
self.assertEqual(organisation.getDescription(),self.title1)
......@@ -322,12 +350,11 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
activity_tool.distribute()
activity_tool.tic()
self.commit()
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),0)
self.assertEqual(organisation.getTitle(),self.title1)
self.assertEqual(organisation.getDescription(),self.title1)
def TryMessageWithErrorOnActivity(self, activity):
@for_each_activity
def testTryMessageWithErrorOnActivity(self, activity):
"""
Make sure that message with errors are not deleted
"""
......@@ -350,10 +377,9 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
activity_tool.manageCancel(organisation.getPhysicalPath(),'crashThisActivity')
# Needed so that the message are commited into the queue
self.commit()
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),0)
def DeferredSetTitleWithRenamedObject(self, activity):
@for_each_activity
def testDeferredSetTitleWithRenamedObject(self, activity):
"""
make sure that it is impossible to rename an object
if some activities are still waiting for this object
......@@ -386,8 +412,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
result = active_process.getResultList()[0]
self.assertEqual(result.method_id , 'getTitle')
self.assertEqual(result.result , self.title1)
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),0)
def TryActiveProcessWithResultDict(self, activity):
"""
......@@ -417,11 +441,9 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
result = result_dict[3]
self.assertEqual(result_dict[3].method_id, 'getTitle')
self.assertEqual(result.result , self.title1)
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),0)
def TryMethodAfterMethod(self, activity):
@for_each_activity
def testTryMethodAfterMethod(self, activity):
"""
Ensure the order of an execution by a method id
"""
......@@ -444,7 +466,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.tic()
self.assertEqual(o.getTitle(), 'acb')
def TryAfterTag(self, activity):
@for_each_activity
def testTryAfterTag(self, activity):
"""
Ensure the order of an execution by a tag
"""
......@@ -468,7 +491,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.tic()
self.assertEqual(o.getCorporateName(), 'cd')
def TryFlushActivityWithAfterTag(self, activity):
@for_each_activity
def testTryFlushActivityWithAfterTag(self, activity):
"""
Ensure the order of an execution by a tag
"""
......@@ -490,11 +514,11 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.assertEqual(o.getTitle(), 'a')
self.assertEqual(o.getDescription(), '?')
self.tic()
self.assertEqual(len(tool.getMessageList()),0)
self.assertEqual(o.getTitle(), 'a')
self.assertEqual(o.getDescription(), 'b')
def CheckScheduling(self, activity):
@for_each_activity
def testScheduling(self, activity):
"""
Check if active objects with different after parameters are executed in a correct order
"""
......@@ -516,7 +540,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.tic()
self.assertEqual(o.getTitle(), 'cb')
def CheckSchedulingAfterTagList(self, activity):
@for_each_activity
def testSchedulingAfterTagList(self, activity):
"""
Check if active objects with different after parameters are executed in a
correct order, when after_tag is passed as a list
......@@ -538,7 +563,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.tic()
self.assertEqual(o.getTitle(), 'last')
def CheckCountMessageWithTag(self, activity):
@for_each_activity
def testCheckCountMessageWithTag(self, activity):
"""
Check countMessageWithTag function.
"""
......@@ -555,39 +581,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.assertEqual(o.getTitle(), 'a')
self.assertEqual(activity_tool.countMessageWithTag('toto'), 0)
def TryConflictErrorsWhileValidating(self, activity):
"""Try to execute active objects which may throw conflict errors
while validating, and check if they are still executed."""
o = self.getOrganisation()
# Monkey patch Queue to induce conflict errors artificially.
def validate(self, *args, **kwargs):
from Products.CMFActivity.Activity.Queue import Queue
if Queue.current_num_conflict_errors < Queue.conflict_errors_limit:
Queue.current_num_conflict_errors += 1
# LOG('TryConflictErrorsWhileValidating', 0, 'causing a conflict error artificially')
raise ConflictError
return self.original_validate(*args, **kwargs)
from Products.CMFActivity.Activity.Queue import Queue
Queue.original_validate = Queue.validate
Queue.validate = validate
try:
# Test some range of conflict error occurences.
for i in xrange(10):
Queue.current_num_conflict_errors = 0
Queue.conflict_errors_limit = i
o.activate(activity = activity).getId()
self.commit()
self.flushAllActivities(silent = 1, loop_size = i + 10)
self.assertFalse(self.portal.portal_activities.getMessageList())
finally:
Queue.validate = Queue.original_validate
del Queue.original_validate
del Queue.current_num_conflict_errors
del Queue.conflict_errors_limit
def TryErrorsWhileFinishingCommitDB(self, activity):
def testTryErrorsWhileFinishingCommitDB(self):
"""Try to execute active objects which may throw conflict errors
while validating, and check if they are still executed."""
activity_tool = self.portal.portal_activities
......@@ -602,7 +596,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
# Test some range of conflict error occurences.
self.portal.organisation_module.reindexObject()
self.commit()
self.assertEqual(len(activity_tool.getMessageList()), 1)
message, = activity_tool.getMessageList()
try:
DB.original_query = DB.query
DB.query = query
......@@ -612,148 +606,43 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
finally:
DB.query = DB.original_query
del DB.original_query
self.assertEqual(len(activity_tool.getMessageList()), 1)
self.deleteMessageList('SQLDict', [message])
def checkIsMessageRegisteredMethod(self, activity):
@for_each_activity
def testIsMessageRegisteredMethod(self, activity):
dedup = activity != 'SQLQueue'
activity_tool = self.portal.portal_activities
object_b = self.getOrganisation()
object_a = object_b.getParentValue()
# First case: creating the same activity twice must only register one.
self.assertEqual(len(activity_tool.getMessageList()), 0) # Sanity check
def check(count):
self.commit()
self.assertEqual(len(activity_tool.getMessageList()), count)
self.tic()
# First case: creating the same activity twice must only register one
# for queues with deduplication.
object_a.activate(activity=activity).getId()
object_a.activate(activity=activity).getId()
self.commit()
self.assertEqual(len(activity_tool.getMessageList()), 1)
activity_tool.manageClearActivities()
self.commit()
# Second case: creating activity with same tag must only register one.
check(1 if dedup else 2)
# Second case: creating activity with same tag must only register one,
# for queues with deduplication.
# This behaviour is actually the same as the no-tag behaviour.
self.assertEqual(len(activity_tool.getMessageList()), 0) # Sanity check
object_a.activate(activity=activity, tag='foo').getId()
object_a.activate(activity=activity, tag='foo').getId()
self.commit()
self.assertEqual(len(activity_tool.getMessageList()), 1)
activity_tool.manageClearActivities()
self.commit()
check(1 if dedup else 2)
# Third case: creating activities with different tags must register both.
self.assertEqual(len(activity_tool.getMessageList()), 0) # Sanity check
object_a.activate(activity=activity, tag='foo').getId()
object_a.activate(activity=activity, tag='bar').getId()
self.commit()
self.assertEqual(len(activity_tool.getMessageList()), 2)
activity_tool.manageClearActivities()
self.commit()
check(2)
# Fourth case: creating activities on different objects must register
# both.
self.assertEqual(len(activity_tool.getMessageList()), 0) # Sanity check
object_a.activate(activity=activity).getId()
object_b.activate(activity=activity).getId()
self.commit()
self.assertEqual(len(activity_tool.getMessageList()), 2)
activity_tool.manageClearActivities()
self.commit()
check(2)
# Fifth case: creating activities with different method must register
# both.
self.assertEqual(len(activity_tool.getMessageList()), 0) # Sanity check
object_a.activate(activity=activity).getId()
object_a.activate(activity=activity).getTitle()
self.commit()
self.assertEqual(len(activity_tool.getMessageList()), 2)
activity_tool.manageClearActivities()
self.commit()
def test_01_DeferredSetTitleSQLDict(self):
# Test if we can add a complete sales order
self.DeferredSetTitleActivity('SQLDict')
def test_02_DeferredSetTitleSQLQueue(self):
# Test if we can add a complete sales order
self.DeferredSetTitleActivity('SQLQueue')
def test_03_DeferredSetTitleSQLJoblib(self):
# Test if we can add a complete sales order
self.DeferredSetTitleActivity('SQLJoblib')
def test_05_InvokeAndCancelSQLDict(self):
# Test if we can add a complete sales order
self.InvokeAndCancelActivity('SQLDict')
def test_06_InvokeAndCancelSQLQueue(self):
# Test if we can add a complete sales order
self.InvokeAndCancelActivity('SQLQueue')
def test_07_InvokeAndCancelSQLJoblib(self):
self.InvokeAndCancelActivity('SQLJoblib')
def test_09_CallOnceWithSQLDict(self):
# Test if we call methods only once
self.CallOnceWithActivity('SQLDict')
def test_10_CallOnceWithSQLQueue(self):
# Test if we call methods only once
self.CallOnceWithActivity('SQLQueue')
def test_11_CallOnceWithSQLJoblib(self):
self.CallOnceWithActivity('SQLJoblib')
def test_13_TryMessageWithErrorOnSQLDict(self):
# Test if we call methods only once
self.TryMessageWithErrorOnActivity('SQLDict')
def test_14_TryMessageWithErrorOnSQLQueue(self):
# Test if we call methods only once
self.TryMessageWithErrorOnActivity('SQLQueue')
def test_15_TryMessageWithErrorOnSQLJoblib(self):
self.TryMessageWithErrorOnActivity('SQLJoblib')
def test_17_TryFlushActivityWithSQLDict(self):
# Test if we call methods only once
self.TryFlushActivity('SQLDict')
def test_18_TryFlushActivityWithSQLQueue(self):
# Test if we call methods only once
self.TryFlushActivity('SQLQueue')
def test_19_TryFlushActivityWithSQLJoblib(self):
# Test if we call methods only once
self.TryFlushActivity('SQLJoblib')
def test_21_TryActivateInsideFlushWithSQLDict(self):
# Test if we call methods only once
self.TryActivateInsideFlush('SQLDict')
def test_22_TryActivateInsideFlushWithSQLQueue(self):
# Test if we call methods only once
self.TryActivateInsideFlush('SQLQueue')
def test_23_TryActivateInsideFlushWithSQLQueue(self):
# Test if we call methods only once
self.TryActivateInsideFlush('SQLJoblib')
def test_25_TryTwoMethodsWithSQLDict(self):
# Test if we call methods only once
self.TryTwoMethods('SQLDict')
def test_26_TryTwoMethodsWithSQLQueue(self):
# Test if we call methods only once
self.TryTwoMethods('SQLQueue')
def test_27_TryTwoMethodsWithSQLJoblib(self):
# Test if we call methods only once
self.TryTwoMethods('SQLJoblib')
def test_29_TryTwoMethodsAndFlushThemWithSQLDict(self):
# Test if we call methods only once
self.TryTwoMethodsAndFlushThem('SQLDict')
def test_30_TryTwoMethodsAndFlushThemWithSQLQueue(self):
# Test if we call methods only once
self.TryTwoMethodsAndFlushThem('SQLQueue')
def test_31_TryTwoMethodsAndFlushThemWithSQLJoblib(self):
# Test if we call methods only once
self.TryTwoMethodsAndFlushThem('SQLJoblib')
check(2)
def test_33_TryActivateFlushActivateTicWithSQLDict(self):
# Test if we call methods only once
......@@ -776,18 +665,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
# Test if we call methods only once
self.TryActivateFlushActivateTic('SQLQueue',commit_sub=1)
def test_42_TryRenameObjectWithSQLDict(self):
# Test if we call methods only once
self.DeferredSetTitleWithRenamedObject('SQLDict')
def test_43_TryRenameObjectWithSQLQueue(self):
# Test if we call methods only once
self.DeferredSetTitleWithRenamedObject('SQLQueue')
def test_44_TryRenameObjectWithSQLJoblib(self):
# Test if we call methods only once
self.DeferredSetTitleWithRenamedObject('SQLJoblib')
def test_46_TryActiveProcessWithSQLDict(self):
# Test if we call methods only once
self.TryActiveProcess('SQLDict')
......@@ -800,18 +677,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
# Test if we call methods only once
self.TryActiveProcessWithResultDict('SQLJoblib')
def test_54_TryAfterMethodIdWithSQLDict(self):
# Test if after_method_id can be used
self.TryMethodAfterMethod('SQLDict')
def test_55_TryAfterMethodIdWithSQLQueue(self):
# Test if after_method_id can be used
self.TryMethodAfterMethod('SQLQueue')
def test_56_TryAfterMethodIdWithSQLJoblib(self):
# Test if after_method_id can be used
self.TryMethodAfterMethod('SQLJoblib')
def test_57_TryCallActivityWithRightUser(self):
# Test if me execute methods with the right user
# This should be independant of the activity used
......@@ -828,49 +693,11 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
# Then execute activities as seb
user = uf.getUserById('seb').__of__(uf)
newSecurityManager(None, user)
self.commit()
activity_tool.distribute()
activity_tool.tic()
self.tic()
email = organisation.get('email')
# Check if what we did was executed as toto
self.assertEqual(email.getOwnerInfo()['id'],'toto')
def test_59_TryAfterTagWithSQLDict(self):
# Test if after_tag can be used
self.TryAfterTag('SQLDict')
def test_60_TryAfterTagWithSQLQueue(self):
# Test if after_tag can be used
self.TryAfterTag('SQLQueue')
def test_61_TryAfterTagWithSQLJoblib(self):
# Test if after_tag can be used
self.TryAfterTag('SQLJoblib')
def test_62_CheckSchedulingWithSQLDict(self):
# Test if scheduling is correct with SQLDict
self.CheckScheduling('SQLDict')
def test_63_CheckSchedulingWithSQLQueue(self):
# Test if scheduling is correct with SQLQueue
self.CheckScheduling('SQLQueue')
def test_64_CheckSchedulingWithSQLJoblib(self):
# Test if scheduling is correct with SQLQueue
self.CheckScheduling('SQLJoblib')
def test_65_CheckSchedulingAfterTagListWithSQLDict(self):
# Test if scheduling is correct with SQLDict
self.CheckSchedulingAfterTagList('SQLDict')
def test_66_CheckSchedulingWithAfterTagListSQLQueue(self):
# Test if scheduling is correct with SQLQueue
self.CheckSchedulingAfterTagList('SQLQueue')
def test_67_CheckSchedulingWithAfterTagListSQLJoblib(self):
# Test if scheduling is correct with SQLQueue
self.CheckSchedulingAfterTagList('SQLJoblib')
def flushAllActivities(self, silent=0, loop_size=1000):
"""Executes all messages until the queue only contains failed
messages.
......@@ -880,10 +707,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
activity_tool.distribute(node_count=1)
activity_tool.tic(processing_node=1)
finished = 1
for message in activity_tool.getMessageList():
if message.processing_node != INVOKE_ERROR_STATE:
finished = 0
finished = all(message.processing_node == INVOKE_ERROR_STATE
for message in activity_tool.getMessageList())
activity_tool.timeShift(3 * VALIDATION_ERROR_DELAY)
self.commit()
......@@ -910,18 +735,17 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
title=original_title)
# Monkey patch Organisation to add a failing method
def failingMethod(self):
raise ValueError, 'This method always fail'
raise ValueError('This method always fail')
Organisation.failingMethod = failingMethod
activity_list = ['SQLQueue', 'SQLDict', 'SQLJoblib']
for activity in activity_list:
for activity in ActivityTool.activity_dict:
# reset
activity_tool.manageClearActivities()
obj.setTitle(original_title)
self.commit()
# activate failing message and flush
for fail_activity in activity_list:
for fail_activity in ActivityTool.activity_dict:
obj.activate(activity = fail_activity).failingMethod()
self.commit()
self.flushAllActivities(silent=1, loop_size=100)
......@@ -942,19 +766,16 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
full_message_list = activity_tool.getMessageList()
remaining_messages = [a for a in full_message_list if a.method_id !=
'failingMethod']
if len(full_message_list) != 4:
self.fail('failingMethod should not have been flushed')
if len(remaining_messages) != 1:
self.fail('Activity tool should have one blocked setTitle activity')
self.assertEqual(len(full_message_list), 4,
'failingMethod should not have been flushed')
self.assertEqual(len(remaining_messages), 1,
'Activity tool should have one blocked setTitle activity')
self.assertEqual(remaining_messages[0].activity_kw['after_method_id'],
['failingMethod'])
self.assertEqual(obj.getTitle(), original_title)
def test_69_TestCountMessageWithTagWithSQLDict(self):
"""
Test new countMessageWithTag function with SQLDict.
"""
self.CheckCountMessageWithTag('SQLDict')
activity_tool.manageClearActivities()
self.commit()
def test_70_TestCancelFailedActiveObject(self):
"""Cancel an active object to make sure that it does not refer to
......@@ -969,7 +790,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
# Monkey patch Organisation to add a failing method
def failingMethod(self):
raise ValueError, 'This method always fail'
raise ValueError('This method always fail')
Organisation.failingMethod = failingMethod
# First, index the object.
......@@ -997,11 +818,9 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
message = activity_tool.getMessageList()[0]
activity_tool.manageCancel(message.object_path, message.method_id)
self.commit()
self.assertEqual(len(activity_tool.getMessageList()), 0)
def test_71_RetryMessageExecution(self):
activity_tool = self.portal.portal_activities
self.assertFalse(activity_tool.getMessageList())
exec_count = [0]
# priority does not matter anymore
priority = random.Random().randint
......@@ -1015,7 +834,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
raise ConflictError if conflict else Exception
def check(retry_list, **activate_kw):
fail = retry_list[-1][0] is not None and 1 or 0
for activity in 'SQLDict', 'SQLQueue', 'SQLJoblib':
for activity in ActivityTool.activity_dict:
exec_count[0] = 0
activity_tool.activate(activity=activity, priority=priority(1,6),
**activate_kw).doSomething(retry_list)
......@@ -1055,54 +874,14 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
finally:
del activity_tool.__class__.doSomething
self.assertFalse(activity_tool.getMessageList())
def test_72_TestConflictErrorsWhileValidatingWithSQLDict(self):
"""
Test if conflict errors spoil out active objects with SQLDict.
"""
self.TryConflictErrorsWhileValidating('SQLDict')
def test_73_TestConflictErrorsWhileValidatingWithSQLQueue(self):
"""
Test if conflict errors spoil out active objects with SQLQueue.
"""
self.TryConflictErrorsWhileValidating('SQLQueue')
def test_74_TestConflictErrorsWhileValidatingWithSQLJoblib(self):
"""
Test if conflict errors spoil out active objects with SQLJoblib.
"""
self.TryConflictErrorsWhileValidating('SQLJoblib')
def test_75_TestErrorsWhileFinishingCommitDBWithSQLDict(self):
"""
"""
self.TryErrorsWhileFinishingCommitDB('SQLDict')
def test_76_TestErrorsWhileFinishingCommitDBWithSQLQueue(self):
"""
"""
self.TryErrorsWhileFinishingCommitDB('SQLQueue')
def test_77_TryFlushActivityWithAfterTagSQLDict(self):
# Test if after_tag can be used
self.TryFlushActivityWithAfterTag('SQLDict')
def test_78_TryFlushActivityWithAfterTagWithSQLQueue(self):
# Test if after_tag can be used
self.TryFlushActivityWithAfterTag('SQLQueue')
def test_79_ActivateKwForNewContent(self):
o1 = self.getOrganisationModule().newContent(
activate_kw=dict(tag='The Tag'))
self.commit()
messages_for_o1 = [m for m in self.getActivityTool().getMessageList()
if m.object_path == o1.getPhysicalPath()]
self.assertNotEquals(0, len(messages_for_o1))
for m in messages_for_o1:
self.assertEqual(m.activity_kw.get('tag'), 'The Tag')
m, = self.getActivityTool().getMessageList(path=o1.getPath())
self.assertEqual(m.activity_kw.get('tag'), 'The Tag')
self.tic()
def test_80_FlushAfterMultipleActivate(self):
orga_module = self.getOrganisationModule()
......@@ -1116,7 +895,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.setDescription(d+'a')
Organisation.updateDesc = updateDesc
self.assertEqual(len(activity_tool.getMessageList()), 0)
# First check dequeue read same message only once
for i in xrange(10):
p.activate(activity="SQLDict").updateDesc()
......@@ -1134,13 +912,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.assertEqual(len(activity_tool.getMessageList()), 10)
activity_tool.flush(p, invoke=0)
self.commit()
self.assertEqual(len(activity_tool.getMessageList()), 0)
def test_81_IsMessageRegisteredSQLDict(self):
"""
This test tests behaviour of IsMessageRegistered method.
"""
self.checkIsMessageRegisteredMethod('SQLDict')
def test_82_AbortTransactionSynchronously(self):
"""
......@@ -1152,7 +923,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
module = self.getOrganisationModule()
organisation = module.newContent(portal_type = 'Organisation')
organisation_id = organisation.getId()
self.commit()
self.tic()
organisation = module[organisation_id]
# Now fake a read conflict.
......@@ -1174,8 +945,9 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.abort()
organisation.uid
def callWithGroupIdParamater(self, activity):
@for_each_activity
def testCallWithGroupIdParamater(self, activity):
dedup = activity != 'SQLQueue'
activity_tool = self.portal.portal_activities
organisation = self.getOrganisation()
# Defined a group method
......@@ -1202,7 +974,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),5)
activity_tool.tic()
expected = dict(SQLDict=1, SQLQueue=5, SQLJoblib=1)[activity]
expected = 1 if dedup else 5
self.assertEqual(expected, organisation.getFoobar())
......@@ -1233,30 +1005,10 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list),20)
activity_tool.tic()
self.assertEqual(dict(SQLDict=11, SQLQueue=60, SQLJoblib=11)[activity],
self.assertEqual(11 if dedup else 60,
organisation.getFoobar())
self.assertEqual(dict(SQLDict=[1, 1, 1], SQLQueue=[5, 5, 10], SQLJoblib=[1,1,1])[activity],
self.assertEqual([1, 1, 1] if dedup else [5, 5, 10],
sorted(foobar_list))
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list), 0)
def test_83a_CallWithGroupIdParamaterSQLDict(self):
"""
Test that group_id parameter is used to separate execution of the same method
"""
self.callWithGroupIdParamater('SQLDict')
def test_83b_CallWithGroupIdParamaterSQLQueue(self):
"""
Test that group_id parameter is used to separate execution of the same method
"""
self.callWithGroupIdParamater('SQLQueue')
def test_83c_CallWithGroupIdParamaterSQLJoblib(self):
"""
Test that group_id parameter is used to separate execution of the same method
"""
self.callWithGroupIdParamater('SQLJoblib')
def test_84_ActivateKwForWorkflowTransition(self):
"""
......@@ -1266,20 +1018,15 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.tic()
o1.validate(activate_kw=dict(tag='The Tag'))
self.commit()
messages_for_o1 = [m for m in self.getActivityTool().getMessageList()
if m.object_path == o1.getPhysicalPath()]
self.assertNotEquals(0, len(messages_for_o1))
for m in messages_for_o1:
self.assertEqual(m.activity_kw.get('tag'), 'The Tag')
m, = self.getActivityTool().getMessageList(path=o1.getPath())
self.assertEqual(m.activity_kw.get('tag'), 'The Tag')
self.tic()
def test_85_LossOfVolatileAttribute(self):
"""
Test that the loss of volatile attribute doesn't loose activities
"""
self.tic()
activity_tool = self.getActivityTool()
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list), 0)
def delete_volatiles():
for property_id in activity_tool.__dict__.keys():
if property_id.startswith('_v_'):
......@@ -1296,6 +1043,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.commit()
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list), 2)
self.tic()
def test_88_ProcessingMultipleMessagesMustRevertIndividualMessagesOnError(self):
"""
......@@ -1306,14 +1054,13 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
Queues supporting message batch processing:
- SQLQueue
"""
self.tic()
activity_tool = self.getActivityTool()
obj = self.portal.organisation_module.newContent(portal_type='Organisation')
active_obj = obj.activate(activity='SQLQueue')
def appendToTitle(self, to_append, fail=False):
self.setTitle(self.getTitle() + to_append)
if fail:
raise ValueError, 'This method always fail'
raise ValueError('This method always fail')
try:
Organisation.appendToTitle = appendToTitle
obj.setTitle('a')
......@@ -1325,8 +1072,9 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.assertEqual(obj.getTitle(), 'a')
self.assertEqual(activity_tool.countMessage(method_id='appendToTitle'), 3)
self.flushAllActivities(silent=1, loop_size=100)
self.assertEqual(activity_tool.countMessage(method_id='appendToTitle'), 1)
self.assertEqual(sorted(obj.getTitle()), ['a', 'b', 'd'])
message, = self.getMessageList('SQLQueue', method_id='appendToTitle')
self.deleteMessageList('SQLQueue', [message])
finally:
del Organisation.appendToTitle
......@@ -1337,7 +1085,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
This only apply to queues supporting batch processing:
- SQLQueue
"""
self.tic()
obj = self.portal.organisation_module.newContent(portal_type='Organisation', title='Pending')
marker_id = 'marker_%i' % (random.randint(1, 10), )
def putMarkerValue(self, marker_id):
......@@ -1359,7 +1106,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
del Organisation.putMarkerValue
del Organisation.checkMarkerValue
def TryUserNotificationOnActivityFailure(self, activity):
@for_each_activity
def testTryUserNotificationOnActivityFailure(self, activity):
message_list = self.portal.MailHost._message_list
del message_list[:]
obj = self.portal.organisation_module.newContent(portal_type='Organisation')
......@@ -1388,73 +1136,37 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
finally:
del Organisation.failingMethod
def test_90_userNotificationOnActivityFailureWithSQLDict(self):
"""
Check that a user notification method is called on message when activity
fails and will not be tried again.
"""
self.TryUserNotificationOnActivityFailure('SQLDict')
def test_91_userNotificationOnActivityFailureWithSQLJoblib(self):
"""
Check user notification sent on activity final error
"""
self.TryUserNotificationOnActivityFailure('SQLJoblib')
def test_92_userNotificationOnActivityFailureWithSQLQueue(self):
"""
Check that a user notification method is called on message when activity
fails and will not be tried again.
"""
self.TryUserNotificationOnActivityFailure('SQLQueue')
def TryUserNotificationRaise(self, activity):
self.tic()
def test_93_tryUserNotificationRaise(self):
activity_tool = self.portal.portal_activities
obj = self.portal.organisation_module.newContent(portal_type='Organisation')
self.tic()
from Products.CMFActivity.ActivityTool import Message
original_notifyUser = Message.notifyUser
def failingMethod(self, *args, **kw):
raise ValueError, 'This method always fail'
raise ValueError('This method always fail')
Message.notifyUser = failingMethod
Organisation.failingMethod = failingMethod
getMessageList = self.getPortalObject().portal_activities.getMessageList
try:
obj.activate(activity=activity, priority=6).failingMethod()
self.commit()
self.flushAllActivities(silent=1, loop_size=100)
message, = getMessageList(activity=activity, method_id='failingMethod')
self.assertEqual(message.processing, 0)
for activity in ActivityTool.activity_dict:
obj.activate(activity=activity, priority=6).failingMethod()
self.commit()
self.flushAllActivities(silent=1, loop_size=100)
message, = activity_tool.getMessageList(
activity=activity, method_id='failingMethod')
self.assertEqual(message.processing_node, -2)
self.assertTrue(message.retry)
activity_tool.manageDelete(message.uid, activity)
self.commit()
finally:
Message.notifyUser = original_notifyUser
del Organisation.failingMethod
def test_93_userNotificationRaiseWithSQLDict(self):
"""
Check that activities are not left with processing=1 when notifyUser raises.
"""
self.TryUserNotificationRaise('SQLDict')
def test_94_userNotificationRaiseWithSQLQueue(self):
"""
Check that activities are not left with processing=1 when notifyUser raises.
"""
self.TryUserNotificationRaise('SQLQueue')
def test_95_userNotificationRaiseWithSQLJoblib(self):
"""
Check that activities are not left with processing=1 when notifyUser raises.
"""
self.TryUserNotificationRaise('SQLJoblib')
def TryActivityRaiseInCommitDoesNotStallActivityConection(self, activity):
@for_each_activity
def testTryActivityRaiseInCommitDoesNotStallActivityConection(self, activity):
"""
Check that an activity which commit raises (as would a regular conflict
error be raised in tpc_vote) does not cause activity connection to
stall.
"""
self.tic()
activity_tool = self.getActivityTool()
try:
Organisation.registerFailingTransactionManager = registerFailingTransactionManager
obj = self.portal.organisation_module.newContent(portal_type='Organisation')
......@@ -1465,26 +1177,21 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.flushAllActivities(silent=1, loop_size=100)
self.commit()
# Check that cmf_activity SQL connection still works
connection_da = self.getPortalObject().cmf_activity_sql_connection()
connection_da = self.portal.cmf_activity_sql_connection()
self.assertFalse(connection_da._registered)
connection_da.query('select 1')
self.assertTrue(connection_da._registered)
self.commit()
self.assertFalse(connection_da._registered)
message, = self.getMessageList(activity)
self.deleteMessageList(activity, [message])
finally:
del Organisation.registerFailingTransactionManager
def test_96_ActivityRaiseInCommitDoesNotStallActivityConectionSQLDict(self):
self.TryActivityRaiseInCommitDoesNotStallActivityConection('SQLDict')
def test_97_ActivityRaiseInCommitDoesNotStallActivityConectionSQLQueue(self):
self.TryActivityRaiseInCommitDoesNotStallActivityConection('SQLQueue')
def TryActivityRaiseInCommitDoesNotLooseMessages(self, activity):
@for_each_activity
def testTryActivityRaiseInCommitDoesNotLoseMessages(self, activity):
"""
"""
self.tic()
activity_tool = self.getActivityTool()
try:
Organisation.registerFailingTransactionManager = registerFailingTransactionManager
obj = self.portal.organisation_module.newContent(portal_type='Organisation')
......@@ -1494,18 +1201,14 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.commit()
self.flushAllActivities(silent=1, loop_size=100)
self.commit()
self.assertEqual(activity_tool.countMessage(method_id='registerFailingTransactionManager'), 1)
message, = self.getMessageList(activity,
method_id='registerFailingTransactionManager')
self.deleteMessageList(activity, [message])
finally:
del Organisation.registerFailingTransactionManager
def test_98_ActivityRaiseInCommitDoesNotLooseMessagesSQLDict(self):
self.TryActivityRaiseInCommitDoesNotLooseMessages('SQLDict')
def test_99_ActivityRaiseInCommitDoesNotLooseMessagesSQLQueue(self):
self.TryActivityRaiseInCommitDoesNotLooseMessages('SQLQueue')
def TryChangeSkinInActivity(self, activity):
self.tic()
@for_each_activity
def testTryChangeSkinInActivity(self, activity):
activity_tool = self.getActivityTool()
def changeSkinToNone(self):
self.getPortalObject().changeSkin(None)
......@@ -1517,24 +1220,18 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.commit()
self.assertEqual(len(activity_tool.getMessageList()), 1)
self.flushAllActivities(silent=1, loop_size=100)
self.assertEqual(len(activity_tool.getMessageList()), 0)
finally:
del Organisation.changeSkinToNone
def test_100_TryChangeSkinInActivitySQLDict(self):
self.TryChangeSkinInActivity('SQLDict')
def test_101_TryChangeSkinInActivitySQLQueue(self):
self.TryChangeSkinInActivity('SQLQueue')
def test_102_TryChangeSkinInActivitySQLJoblib(self):
self.TryChangeSkinInActivity('SQLJoblib')
def test_103_1_CheckSQLDictDoesNotDeleteSimilaritiesBeforeExecution(self):
@for_each_activity
def testDeduplicatingQueuesDoNotDeleteSimilaritiesBeforeExecution(self,
activity):
"""
Test that SQLDict does not delete similar messages which have the same
method_id and path but a different tag before execution.
"""
if activity == 'SQLQueue':
return
activity_tool = self.getActivityTool()
marker = []
def doSomething(self, other_tag):
......@@ -1542,22 +1239,23 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
activity_tool.__class__.doSomething = doSomething
try:
# Adds two similar but not the same activities.
activity_tool.activate(activity='SQLDict', after_tag='foo',
activity_tool.activate(activity=activity, after_tag='foo',
tag='a').doSomething(other_tag='b')
activity_tool.activate(activity='SQLDict', after_tag='bar',
activity_tool.activate(activity=activity, after_tag='bar',
tag='b').doSomething(other_tag='a')
self.commit()
activity_tool.tic() # make sure distribution phase was not skipped
activity_tool.distribute()
# after distribute, similarities are still there.
self.assertEqual(len(activity_tool.getMessageList()), 2)
self.assertEqual(len(self.getMessageList(activity)), 2)
activity_tool.tic()
self.assertEqual(len(activity_tool.getMessageList()), 0)
self.assertEqual(marker, [1])
finally:
del activity_tool.__class__.doSomething
def test_103_2_CheckSQLDictDoesNotDeleteDuplicatesBeforeExecution(self):
@for_each_activity
def testDeduplicatingQueuesDoNotDeleteDuplicatesBeforeExecution(self,
activity):
"""
Test that SQLDict does not delete messages before execution
even if messages have the same method_id and path and tag.
......@@ -1568,49 +1266,29 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
Deduplication is cheap:
- inside the transaction which spawned duplicate activities, because it
has to have created activities around anyway, and can keep track
- inside the CMFActvitiy-level processing surrounding activity execution
- inside the CMFActivity-level processing surrounding activity execution
because it has to load the activities to process them anyway
"""
if activity == 'SQLQueue':
return
activity_tool = self.getActivityTool()
# Adds two same activities.
activity_tool.activate(activity='SQLDict', after_tag='foo', priority=2,
activity_tool.activate(activity=activity, after_tag='foo', priority=2,
tag='a').getId()
self.commit()
uid1, = [x.uid for x in activity_tool.getMessageList()]
activity_tool.activate(activity='SQLDict', after_tag='bar', priority=1,
uid1, = [x.uid for x in self.getMessageList(activity)]
activity_tool.activate(activity=activity, after_tag='bar', priority=1,
tag='a').getId()
self.commit()
uid2, = [x.uid for x in activity_tool.getMessageList() if x.uid != uid1]
uid2, = [x.uid for x in self.getMessageList(activity) if x.uid != uid1]
self.assertEqual(len(activity_tool.getMessageList()), 2)
activity_tool.distribute()
# After distribute, duplicate is still present.
self.assertItemsEqual([uid1, uid2], [x.uid for x in activity_tool.getMessageList()])
self.assertItemsEqual([uid1, uid2],
[x.uid for x in self.getMessageList(activity)])
activity_tool.tic()
self.assertEqual(len(activity_tool.getMessageList()), 0)
def test_103_3_CheckSQLJoblibDoesNotDeleteDuplicatesBeforeExecution(self):
"""
(see test_103_2_CheckSQLDictDoesNotDeleteDuplicatesBeforeExecution)
"""
activity_tool = self.getActivityTool()
# Adds two same activities.
activity_tool.activate(activity='SQLJoblib', after_tag='foo', priority=2,
tag='a').getId()
self.commit()
uid1, = [x.uid for x in activity_tool.getMessageList()]
activity_tool.activate(activity='SQLJoblib', after_tag='bar', priority=1,
tag='a').getId()
self.commit()
uid2, = [x.uid for x in activity_tool.getMessageList() if x.uid != uid1]
self.assertEqual(len(activity_tool.getMessageList()), 2)
activity_tool.distribute()
# After distribute, duplicate is still present.
self.assertItemsEqual([uid1, uid2], [x.uid for x in activity_tool.getMessageList()])
activity_tool.tic()
self.assertEqual(len(activity_tool.getMessageList()), 0)
def test_103_4_CheckSQLDictDistributeWithSerializationTagAndGroupMethodId(
self):
def testCheckSQLDictDistributeWithSerializationTagAndGroupMethodId(self):
"""
Distribuation was at some point buggy with this scenario when there was
activate with the same serialization_tag and one time with a group_method
......@@ -1631,7 +1309,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
# After distribute, there is no deletion because it is different method
self.assertEqual(len(activity_tool.getMessageList()), 2)
self.tic()
self.assertEqual(len(activity_tool.getMessageList()), 0)
def test_104_interQueuePriorities(self):
"""
......@@ -1680,7 +1357,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
del Organisation.mustRunBefore
del Organisation.mustRunAfter
def CheckActivityRuntimeEnvironment(self, activity):
@for_each_activity
def testCheckActivityRuntimeEnvironment(self, activity):
document = self.portal.organisation_module
activity_result = []
def extractActivityRuntimeEnvironment(self):
......@@ -1708,21 +1386,11 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
finally:
del document.__class__.doSomething
def test_105_activityRuntimeEnvironmentSQLDict(self):
self.CheckActivityRuntimeEnvironment('SQLDict')
def test_106_activityRuntimeEnvironmentSQLQueue(self):
self.CheckActivityRuntimeEnvironment('SQLQueue')
def test_107_activityRuntimeEnvironmentSQLJoblib(self):
self.CheckActivityRuntimeEnvironment('SQLJoblib')
def CheckSerializationTag(self, activity):
@for_each_activity
def testSerializationTag(self, activity):
organisation = self.portal.organisation_module.newContent(portal_type='Organisation')
self.tic()
activity_tool = self.getActivityTool()
result = activity_tool.getMessageList()
self.assertEqual(len(result), 0)
# First scenario: activate, distribute, activate, distribute
# Create first activity and distribute: it must be distributed
organisation.activate(activity=activity, serialization_tag='1').getTitle()
......@@ -1741,8 +1409,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
result = activity_tool.getMessageList()
self.assertEqual(len([x for x in result if x.processing_node == 0]), 1) # Distributed message list len is still 1
self.tic()
result = activity_tool.getMessageList()
self.assertEqual(len(result), 0)
# Second scenario: activate, activate, distribute
# Both messages must be distributed (this is different from regular tags)
organisation.activate(activity=activity, serialization_tag='1', priority=2).getTitle()
......@@ -1760,19 +1426,10 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
message, = [x for x in result if x.processing_node == -1]
self.assertEqual(message.method_id, 'getTitle')
self.tic()
result = activity_tool.getMessageList()
self.assertEqual(len(result), 0)
# Check that giving a None value to serialization_tag does not confuse
# CMFActivity
organisation.activate(activity=activity, serialization_tag=None).getTitle()
self.tic()
self.assertEqual(len(activity_tool.getMessageList()), 0)
def test_108_checkSerializationTagSQLDict(self):
self.CheckSerializationTag('SQLDict')
def test_109_checkSerializationTagSQLQueue(self):
self.CheckSerializationTag('SQLQueue')
def test_110_testAbsoluteUrl(self):
# Tests that absolute_url works in activities. The URL generation is based
......@@ -1868,7 +1525,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
def first(context):
context.changeSkin(skin_selection_name)
if getattr(context, script_id, None) is not None:
raise Exception, '%s is not supposed to be found here.' % (script_id, )
raise Exception('%s is not supposed to be found here.' % script_id)
def second(context):
# If the wrong skin is selected this will raise.
getattr(context, script_id)
......@@ -1885,7 +1542,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
# Forcibly restore skin selection, otherwise getMessageList would only
# emit a log when retrieving the ZSQLMethod.
portal.changeSkin(None)
self.assertEqual(len(activity_tool.getMessageList()), 0)
finally:
del Organisation.firstTest
del Organisation.secondTest
......@@ -1919,7 +1575,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
rendez_vous_event.set()
# When this event is available, it means test has called process_shutdown.
activity_event.wait()
from Products.CMFActivity.Activity.SQLDict import SQLDict
original_dequeue = SQLDict.dequeueMessage
queue_tic_test_dict = {}
def dequeueMessage(self, activity_tool, processing_node):
......@@ -1983,7 +1638,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.assertEqual(len(activity_tool.getMessageList()), 1)
finally:
# Put activity tool back in a working state
from Products.CMFActivity.ActivityTool import cancelProcessShutdown
try:
cancelProcessShutdown()
except StandardException:
......@@ -1994,6 +1648,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
finally:
del Organisation.waitingActivity
SQLDict.dequeueMessage = original_dequeue
self.tic()
def test_hasActivity(self):
active_object = self.portal.organisation_module.newContent(
......@@ -2005,7 +1660,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.assertFalse(active_process.hasActivity())
def test(obj, **kw):
for activity in ('SQLDict', 'SQLQueue', 'SQLJoblib'):
for activity in ActivityTool.activity_dict:
active_object.activate(activity=activity, **kw).getTitle()
self.commit()
self.assertTrue(obj.hasActivity(), activity)
......@@ -2016,7 +1671,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
test(active_process, active_process=active_process)
test(active_process, active_process=active_process.getPath())
def _test_hasErrorActivity_error(self, activity):
@for_each_activity
def test_hasErrorActivity_error(self, activity):
# Monkey patch Organisation to add a failing method
def failingMethod(self):
raise ValueError('This method always fail')
......@@ -2046,17 +1702,11 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
# assert that an error has been seen
self.assertTrue(active_object.hasErrorActivity())
self.assertTrue(active_process.hasErrorActivity())
message, = self.getMessageList(activity)
self.deleteMessageList(activity, [message])
def test_hasErrorActivity_error_SQLQueue(self):
self._test_hasErrorActivity_error('SQLQueue')
def test_hasErrorActivity_error_SQLDict(self):
self._test_hasErrorActivity_error('SQLDict')
def test_hasErrorActivity_error_SQLJoblib(self):
self._test_hasErrorActivity_error('SQLJoblib')
def _test_hasErrorActivity(self, activity):
@for_each_activity
def test_hasErrorActivity(self, activity):
active_object = self.portal.organisation_module.newContent(
portal_type='Organisation')
active_process = self.portal.portal_activities.newActiveProcess()
......@@ -2082,15 +1732,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.assertFalse(active_object.hasErrorActivity())
self.assertFalse(active_process.hasErrorActivity())
def test_hasErrorActivity_SQLQueue(self):
self._test_hasErrorActivity('SQLQueue')
def test_hasErrorActivity_SQLDict(self):
self._test_hasErrorActivity('SQLDict')
def test_hasErrorActivity_SQLJoblib(self):
self._test_hasErrorActivity('SQLJoblib')
def test_active_object_hasActivity_does_not_catch_exceptions(self):
"""
Some time ago, hasActivity was doing a silent try/except, and this was
......@@ -2120,30 +1761,67 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
finally:
DB.query = DB.original_query
del DB.original_query
self.tic()
def test_MAX_MESSAGE_LIST_SIZE(self):
from Products.CMFActivity.Activity import SQLBase
MAX_MESSAGE_LIST_SIZE = SQLBase.MAX_MESSAGE_LIST_SIZE
def test_insert_max_payload(self):
activity_tool = self.portal.portal_activities
# XXX: For unknown reasons, this test runs faster after the tables are
# recreated. We could also make this test run before all others.
activity_tool.manageClearActivities()
self.commit()
max_allowed_packet = activity_tool.getSQLConnection().getMaxAllowedPacket()
insert_list = []
invoke_list = []
N = 100
class Skip(Exception):
"""
Speed up test by not interrupting the first transaction
as soon as we have the information we want.
"""
original_query = DB.query.__func__
def query(self, query_string, *args, **kw):
if query_string.startswith('INSERT'):
insert_list.append(len(query_string))
if not n:
raise Skip
return original_query(self, query_string, *args, **kw)
def check():
for i in xrange(1, N):
activity_tool.activate(activity=activity, group_id=str(i)
).doSomething(arg)
activity_tool.activate(activity=activity, group_id='~'
).doSomething(' ' * n)
self.tic()
self.assertEqual(len(invoke_list), N)
invoke_list.remove(n)
self.assertEqual(set(invoke_list), {len(arg)})
del invoke_list[:]
activity_tool.__class__.doSomething = \
lambda self, arg: invoke_list.append(len(arg))
try:
SQLBase.MAX_MESSAGE_LIST_SIZE = 3
def dummy_counter(o):
self.__call_count += 1
o = self.portal.organisation_module.newContent(portal_type='Organisation')
for activity in "SQLDict", "SQLQueue", "SQLJoblib":
self.__call_count = 0
try:
for i in xrange(10):
method_name = 'dummy_counter_%s' % i
getattr(o.activate(activity=activity), method_name)()
setattr(Organisation, method_name, dummy_counter)
self.flushAllActivities()
finally:
for i in xrange(10):
delattr(Organisation, 'dummy_counter_%s' % i)
self.assertEqual(self.__call_count, 10)
DB.query = query
for activity in ActivityTool.activity_dict:
arg = ' ' * (max_allowed_packet // N)
# Find the size of the last message argument, such that all messages
# are inserted in a single query whose size is to the maximum allowed.
n = 0
self.assertRaises(Skip, check)
self.abort()
n = max_allowed_packet - insert_list.pop()
self.assertFalse(insert_list)
# Now check with the biggest insert query possible.
check()
self.assertEqual(max_allowed_packet, insert_list.pop())
self.assertFalse(insert_list)
# And check that the insert query is split
# in order not to exceed max_allowed_packet.
n += 1
check()
self.assertEqual(len(insert_list), 2)
del insert_list[:]
finally:
SQLBase.MAX_MESSAGE_LIST_SIZE = MAX_MESSAGE_LIST_SIZE
del activity_tool.__class__.doSomething
DB.query = original_query
def test_115_TestSerializationTagSQLDictPreventsParallelExecution(self):
"""
......@@ -2151,11 +1829,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
then serialization tag guarantees that only one of the same serialization
tagged activities can be processed at the same time.
"""
from Products.CMFActivity import ActivityTool
portal = self.portal
activity_tool = portal.portal_activities
self.tic()
# Add 6 activities
portal.organisation_module.activate(activity='SQLDict', tag='', serialization_tag='test_115').getId()
......@@ -2175,7 +1850,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
activity_tool.distribute()
self.commit()
from Products.CMFActivity import ActivityTool
activity = ActivityTool.activity_dict['SQLDict']
activity.getProcessableMessageList(activity_tool, 1)
self.commit()
......@@ -2184,7 +1858,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
activity.getProcessableMessageList(activity_tool, 3)
self.commit()
result = activity._getMessageList(activity_tool)
result = activity._getMessageList(activity_tool.getSQLConnection())
try:
self.assertEqual(len([message
for message in result
......@@ -2205,21 +1879,19 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
1)
finally:
# Clear activities from all nodes
activity_tool.SQLBase_delMessage(table=SQLDict.sql_table,
uid=[message.uid for message in result])
self.commit()
self.deleteMessageList('SQLDict', result)
def test_116_RaiseInCommitBeforeMessageExecution(self):
"""
Test behaviour of CMFActivity when the commit just before message
execution fails. In particular, CMFActivity should restart the
activities it selected (processing=1) instead of ignoring them forever.
execution fails. In particular, it should restart the messages it
selected (processing_node=current_node) instead of ignoring them forever.
"""
processed = []
activity_tool = self.portal.portal_activities
activity_tool.__class__.doSomething = processed.append
try:
for activity in 'SQLDict', 'SQLQueue', 'SQLJoblib':
for activity in ActivityTool.activity_dict:
activity_tool.activate(activity=activity).doSomething(activity)
self.commit()
# Make first commit in dequeueMessage raise
......@@ -2228,7 +1900,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
# Normally, the request stops here and Zope aborts the transaction
self.abort()
self.assertEqual(processed, [])
# Activity is already in 'processing=1' state. Check tic reselects it.
# Activity is already reserved for current node. Check tic reselects it.
activity_tool.tic()
self.assertEqual(processed, [activity])
del processed[:]
......@@ -2273,14 +1945,13 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
# .. now no messages with this tag should apper
self.assertEqual(0, portal.portal_activities.countMessageWithTag(tag))
def TryNotificationSavedOnEventLogWhenNotifyUserRaises(self, activity):
activity_tool = self.getActivityTool()
self.tic()
@for_each_activity
def testTryNotificationSavedOnEventLogWhenNotifyUserRaises(self, activity):
obj = self.portal.organisation_module.newContent(portal_type='Organisation')
self.tic()
original_notifyUser = Message.notifyUser.im_func
def failSendingEmail(self, *args, **kw):
raise MailHostError, 'Mail is not sent'
raise MailHostError('Mail is not sent')
activity_unit_test_error = Exception()
def failingMethod(self):
raise activity_unit_test_error
......@@ -2291,41 +1962,23 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
obj.activate(activity=activity, priority=6).failingMethod()
self.commit()
self.flushAllActivities(silent=1, loop_size=100)
message, = activity_tool.getMessageList()
message, = self.getMessageList(activity)
self.commit()
for log_record in self.logged:
if log_record.name == 'ActivityTool' and log_record.levelname == 'WARNING':
type, value, trace = log_record.exc_info
self.commit()
self.assertIs(activity_unit_test_error, value)
self.deleteMessageList(activity, [message])
finally:
Message.notifyUser = original_notifyUser
del Organisation.failingMethod
self._ignore_log_errors()
def test_118_userNotificationSavedOnEventLogWhenNotifyUserRaisesWithSQLDict(self):
"""
Check the error is saved on event log even if the mail notification is not sent.
"""
self.TryNotificationSavedOnEventLogWhenNotifyUserRaises('SQLDict')
def test_119_userNotificationSavedOnEventLogWhenNotifyUserRaisesWithSQLQueue(self):
"""
Check the error is saved on event log even if the mail notification is not sent.
"""
self.TryNotificationSavedOnEventLogWhenNotifyUserRaises('SQLQueue')
def test_120_userNotificationSavedOnEventLogWhenNotifyUserRaisesWithSQLJoblib(self):
"""
Check the error is saved on event log even if the mail notification is not sent.
"""
self.TryNotificationSavedOnEventLogWhenNotifyUserRaises('SQLJoblib')
def TryUserMessageContainingNoTracebackIsStillSent(self, activity):
activity_tool = self.getActivityTool()
@for_each_activity
def testTryUserMessageContainingNoTracebackIsStillSent(self, activity):
# With Message.__call__
# 1: activity context does not exist when activity is executed
self.tic()
obj = self.portal.organisation_module.newContent(portal_type='Organisation')
self.tic()
notification_done = []
......@@ -2334,40 +1987,25 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.traceback = None
original_notifyUser = Message.notifyUser
def failingMethod(self):
raise ValueError, "This method always fail"
raise ValueError("This method always fail")
Message.notifyUser = fake_notifyUser
Organisation.failingMethod = failingMethod
try:
obj.activate(activity=activity).failingMethod()
self.commit()
self.flushAllActivities(silent=1, loop_size=100)
message_list = activity_tool.getMessageList()
self.assertEqual(len(message_list), 1)
message, = self.getMessageList(activity)
self.assertEqual(len(notification_done), 1)
message = message_list[0]
self.assertEqual(message.traceback, None)
message(activity_tool)
activity_tool.manageCancel(message.object_path, message.method_id)
self.commit()
message(self.getActivityTool())
self.deleteMessageList(activity, [message])
finally:
Message.notifyUser = original_notifyUser
del Organisation.failingMethod
def test_121_sendMessageWithNoTracebackWithSQLQueue(self):
self.TryUserMessageContainingNoTracebackIsStillSent('SQLQueue')
def test_122_sendMessageWithNoTracebackWithSQLDict(self):
self.TryUserMessageContainingNoTracebackIsStillSent('SQLDict')
def test_123_sendMessageWithNoTracebackWithSQLJoblib(self):
"""
Check that message with no traceback is still sen
"""
self.TryUserMessageContainingNoTracebackIsStillSent('SQLJoblib')
def TryNotificationSavedOnEventLogWhenSiteErrorLoggerRaises(self, activity):
@for_each_activity
def testTryNotificationSavedOnEventLogWhenSiteErrorLoggerRaises(self, activity):
# Make sure that no active object is installed.
activity_tool = self.portal.portal_activities
o = self.getOrganisation()
class ActivityUnitTestError(Exception):
pass
......@@ -2386,7 +2024,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self._catch_log_errors()
o.activate(activity = activity).failingMethod()
self.commit()
self.assertEqual(len(activity_tool.getMessageList()), 1)
message, = self.getMessageList(activity)
self.flushAllActivities(silent = 1)
SiteErrorLog.raising = original_raising
self.commit()
......@@ -2394,64 +2032,12 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
if log_record.name == 'ActivityTool' and log_record.levelname == 'WARNING':
type, value, trace = log_record.exc_info
self.assertIs(activity_unit_test_error, value)
self.deleteMessageList(activity, [message])
finally:
SiteErrorLog.raising = original_raising
del Organisation.failingMethod
self._ignore_log_errors()
def test_124_userNotificationSavedOnEventLogWhenSiteErrorLoggerRaisesWithSQLJoblib(self):
"""
Check that message not saved in site error logger is not lost
"""
self.TryNotificationSavedOnEventLogWhenSiteErrorLoggerRaises('SQLJoblib')
def test_125_userNotificationSavedOnEventLogWhenSiteErrorLoggerRaisesWithSQLDict(self):
"""
Check that message not saved in site error logger is not lost'
"""
self.TryNotificationSavedOnEventLogWhenSiteErrorLoggerRaises('SQLDict')
def test_125_userNotificationSavedOnEventLogWhenSiteErrorLoggerRaisesWithSQLJoblib(self):
"""
Check that message not saved in site error logger is not lost'
"""
self.TryNotificationSavedOnEventLogWhenSiteErrorLoggerRaises('SQLJoblib')
def test_126_userNotificationSavedOnEventLogWhenSiteErrorLoggerRaisesWithSQLQueue(self):
self.TryNotificationSavedOnEventLogWhenSiteErrorLoggerRaises('SQLQueue')
def test_127_checkConflictErrorAndNoRemainingActivities(self):
"""
When an activity creates several activities, make sure that all newly
created activities are not commited if there is ZODB Conflict error
"""
from Products.CMFActivity.Activity import SQLBase
MAX_MESSAGE_LIST_SIZE = SQLBase.MAX_MESSAGE_LIST_SIZE
try:
SQLBase.MAX_MESSAGE_LIST_SIZE = 1
activity_tool = self.portal.portal_activities
def doSomething(self):
self.serialize()
self.activate(activity='SQLQueue').getId()
self.activate(activity='SQLQueue').getTitle()
conn = self._p_jar
tid = self._p_serial
oid = self._p_oid
try:
conn.db().invalidate({oid: tid})
except TypeError:
conn.db().invalidate(tid, {oid: tid})
activity_tool.__class__.doSomething = doSomething
activity_tool.activate(activity='SQLQueue').doSomething()
self.commit()
activity_tool.tic()
message_list = activity_tool.getMessageList()
self.assertEqual(['doSomething'],[x.method_id for x in message_list])
activity_tool.manageClearActivities()
finally:
SQLBase.MAX_MESSAGE_LIST_SIZE = MAX_MESSAGE_LIST_SIZE
def test_128_CheckDistributeWithSerializationTagAndGroupMethodId(self):
activity_tool = self.portal.portal_activities
obj1 = activity_tool.newActiveProcess()
......@@ -2466,7 +2052,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
group_method_call_list.append(r)
activity_tool.__class__.doSomething = doSomething
try:
for activity in 'SQLDict', 'SQLQueue', 'SQLJoblib':
for activity in ActivityTool.activity_dict:
activity_kw = dict(activity=activity, serialization_tag=self.id(),
group_method_id='portal_activities/doSomething')
obj1.activate(**activity_kw).dummy(1, x=None)
......@@ -2488,11 +2074,8 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.assertEqual(len(activity_tool.getMessageList()), 2)
activity_tool.tic()
self.assertEqual(group_method_call_list.pop(),
dict(SQLDict=[message2],
SQLQueue=[message1, message2],
SQLJoblib=[message2])[activity])
[message2] if activity != 'SQLQueue' else [message1, message2])
self.assertFalse(group_method_call_list)
self.assertFalse(activity_tool.getMessageList())
finally:
del activity_tool.__class__.doSomething
......@@ -2600,7 +2183,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
transaction.get().addBeforeCommitHook(_raise, (error,))
obj.__class__.doSomething = doSomething
try:
for activity in 'SQLDict', 'SQLQueue', 'SQLJoblib':
for activity in ActivityTool.activity_dict:
for conflict_error in False, True:
weakref_list = []
obj.activity_count = obj.on_error_count = 0
......@@ -2731,7 +2314,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
kw = {}
self._catch_log_errors(subsystem='CMFActivity')
try:
for kw['activity'] in 'SQLDict', 'SQLQueue', 'SQLJoblib':
for kw['activity'] in ActivityTool.activity_dict:
for kw['group_method_id'] in '', None:
obj = activity_tool.newActiveProcess()
self.tic()
......@@ -2818,7 +2401,6 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
self.commit()
activity_tool.timeShift(VALIDATION_ERROR_DELAY)
activity_tool.tic()
self.assertFalse(activity_tool.getMessageList())
finally:
del obj.__class__.doSomething
......@@ -2845,7 +2427,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
skin.manage_delObjects([script_id])
self.tic()
def testGetCurrentNode(self):
def test_getCurrentNode(self):
current_node = getattr(getConfiguration(), 'product_config', {}) \
.get('cmfactivity', {}).get('node-id')
if not current_node:
......@@ -2855,7 +2437,7 @@ class TestCMFActivity(ERP5TypeTestCase, LogInterceptor):
activity_node = self.portal.portal_activities.getCurrentNode()
self.assertEqual(activity_node, current_node)
def testGetServerAddress(self):
def test_getServerAddress(self):
ip = port = ''
for k, v in socket_map.items():
if hasattr(v, 'addr'):
......
......@@ -2211,11 +2211,8 @@ class ERP5Generator(PortalGenerator):
createDirectoryView(ps, reg_key)
def setupDefaultSkins(self, p):
from Products.CMFCore.DirectoryView import addDirectoryViews
from Products.CMFActivity import cmfactivity_globals
ps = p.portal_skins
self.addCMFDefaultDirectoryViews(p)
addDirectoryViews(ps, 'skins', cmfactivity_globals)
ps.manage_addProduct['OFSP'].manage_addFolder(id='external_method')
ps.manage_addProduct['OFSP'].manage_addFolder(id='custom')
# Set the 'custom' layer a high priority, so it remains the first
......@@ -2223,7 +2220,6 @@ class ERP5Generator(PortalGenerator):
ps['custom'].manage_addProperty("business_template_skin_layer_priority", 100.0, "float")
skin_folder_list = [ 'custom'
, 'external_method'
, 'activity'
] + self.CMFDEFAULT_FOLDER_LIST
skin_folders = ', '.join(skin_folder_list)
ps.addSkinSelection( 'View'
......
return 'ActivityTool_manageDelete?uid=%s&activity=%s' % (context.uid, context.activity)
return 'manageDelete?message_uid_list:int:list=%s&activity=%s' % (context.uid, context.activity)
SELECT count(*) AS message, method_id, processing, processing_node AS node, min(priority) AS min_pri, max(priority) AS max_pri FROM <dtml-var table> GROUP BY method_id, processing, processing_node ORDER BY node
\ No newline at end of file
SELECT count(*) AS `count`, method_id, processing_node AS node, min(priority) AS min_pri, max(priority) AS max_pri FROM <dtml-var table> GROUP BY processing_node, method_id ORDER BY processing_node, method_id
\ No newline at end of file
......@@ -133,6 +133,14 @@
<key> <string>id</string> </key>
<value> <string>ActivityTool_getCurrentActivities</string> </value>
</item>
<item>
<key> <string>max_cache_</string> </key>
<value> <int>0</int> </value>
</item>
<item>
<key> <string>max_rows_</string> </key>
<value> <int>0</int> </value>
</item>
<item>
<key> <string>title</string> </key>
<value> <string></string> </value>
......
# searching
# processing_node column is manage by methods called by getMessageTempObjectList
if kw.get('processing_node', None) == '':
del kw['processing_node']
for k, v in kw.items():
if v:
if k == "str_object_path":
kw["path"] = v
elif k == "uid_activity":
kw["uid"] = v
elif k in ('method_id', 'processing_node', 'retry'):
continue
del kw[k]
message_kw = dict([(k,kw[k]) for k in ['uid_activity','str_object_path','method_id',
'args','retry','processing_node',
'processing'] if not(kw.get(k) in ('',None))])
if message_kw.has_key("str_object_path"):
message_kw["path"] = message_kw.pop("str_object_path")
if message_kw.has_key("uid_activity"):
message_kw["uid"] = message_kw.pop("uid_activity")
message_list = context.getMessageTempObjectList(**kw)
for message in message_list:
message.edit(
str_object_path = '/'.join(message.object_path),
uid_activity = str(message.uid) + ' ('+ message.activity[3:] +')',
arguments = str(message.args),
delete = '[Delete]',
restart = '[Restart]',
)
message_list = context.getMessageTempObjectList(**message_kw)
message_list_to_show = []
while len(message_list) > 0:
message = message_list.pop(0)
message.edit(str_object_path = '/'.join(str(i) for i in message.object_path))
message.edit(uid_activity = str(message.uid) + ' ('+ message.activity[3:] +')')
message.edit(arguments = str(message.args))
message.edit(delete = '[Delete]')
message.edit(restart = '[Restart]')
message_list_to_show.append(message)
return message_list_to_show
return message_list
SELECT priority as pri, MIN(timediff(NOW(), date)) AS min, AVG(timediff(NOW() , date)) AS avg, MAX(timediff(NOW() , date)) AS max FROM <dtml-var table> GROUP BY priority;
\ No newline at end of file
SELECT priority AS pri,
TIME_FORMAT(TIMEDIFF(UTC_TIMESTAMP(6), MAX(date)), '%T') AS min,
TIME_FORMAT(TIMEDIFF(UTC_TIMESTAMP(6), AVG(date)), '%T') AS avg,
TIME_FORMAT(TIMEDIFF(UTC_TIMESTAMP(6), MIN(date)), '%T') AS max
FROM <dtml-var table> GROUP BY priority
\ No newline at end of file
......@@ -18,6 +18,14 @@
<key> <string>id</string> </key>
<value> <string>ActivityTool_getSQLActivities</string> </value>
</item>
<item>
<key> <string>max_cache_</string> </key>
<value> <int>0</int> </value>
</item>
<item>
<key> <string>max_rows_</string> </key>
<value> <int>0</int> </value>
</item>
<item>
<key> <string>title</string> </key>
<value> <string></string> </value>
......
data = {}
for d, sql in [('SQLDict',context.ActivityTool_getCurrentActivities(table='message')),
('SQLQueue',context.ActivityTool_getCurrentActivities(table='message_queue'))]:
data[d] = {'line_list':[]}
for line in sql:
tmp = {}
for k in ['message','method_id','processing','node','min_pri','max_pri']:
tmp[k] = line[k]
data[d]['line_list'].append(tmp)
for d, sql in [('SQLDict2',context.ActivityTool_getSQLActivities(table='message')),
('SQLQueue2',context.ActivityTool_getSQLActivities(table='message_queue'))]:
data[d] = {'line_list':[]}
for line in sql:
tmp = {'pri':line['pri']}
for k in ['min','avg','max']:
tmp[k] = str(line[k])
data[d]['line_list'].append(tmp)
import json
return json.dumps(data)
return json.dumps({
q + ('2' if i else ''): {
'line_list': [dict(zip(results.names(), row)) for row in results]
}
for i, q in enumerate((context.ActivityTool_getCurrentActivities,
context.ActivityTool_getSQLActivities))
for q, results in (('SQLDict', q(table='message')),
('SQLQueue', q(table='message_queue')))
})
<?xml version="1.0"?>
<ZopeData>
<record id="1" aka="AAAAAAAAAAE=">
<pickle>
<global name="PythonScript" module="Products.PythonScripts.PythonScript"/>
</pickle>
<pickle>
<dictionary>
<item>
<key> <string>Script_magic</string> </key>
<value> <int>3</int> </value>
</item>
<item>
<key> <string>_bind_names</string> </key>
<value>
<object>
<klass>
<global name="NameAssignments" module="Shared.DC.Scripts.Bindings"/>
</klass>
<tuple/>
<state>
<dictionary>
<item>
<key> <string>_asgns</string> </key>
<value>
<dictionary>
<item>
<key> <string>name_container</string> </key>
<value> <string>container</string> </value>
</item>
<item>
<key> <string>name_context</string> </key>
<value> <string>context</string> </value>
</item>
<item>
<key> <string>name_m_self</string> </key>
<value> <string>script</string> </value>
</item>
<item>
<key> <string>name_subpath</string> </key>
<value> <string>traverse_subpath</string> </value>
</item>
</dictionary>
</value>
</item>
</dictionary>
</state>
</object>
</value>
</item>
<item>
<key> <string>_params</string> </key>
<value> <string>uid,activity,**kw</string> </value>
</item>
<item>
<key> <string>id</string> </key>
<value> <string>ActivityTool_manageDelete</string> </value>
</item>
</dictionary>
</pickle>
</record>
</ZopeData>
<?xml version="1.0"?>
<ZopeData>
<record id="1" aka="AAAAAAAAAAE=">
<pickle>
<global name="PythonScript" module="Products.PythonScripts.PythonScript"/>
</pickle>
<pickle>
<dictionary>
<item>
<key> <string>Script_magic</string> </key>
<value> <int>3</int> </value>
</item>
<item>
<key> <string>_bind_names</string> </key>
<value>
<object>
<klass>
<global name="NameAssignments" module="Shared.DC.Scripts.Bindings"/>
</klass>
<tuple/>
<state>
<dictionary>
<item>
<key> <string>_asgns</string> </key>
<value>
<dictionary>
<item>
<key> <string>name_container</string> </key>
<value> <string>container</string> </value>
</item>
<item>
<key> <string>name_context</string> </key>
<value> <string>context</string> </value>
</item>
<item>
<key> <string>name_m_self</string> </key>
<value> <string>script</string> </value>
</item>
<item>
<key> <string>name_subpath</string> </key>
<value> <string>traverse_subpath</string> </value>
</item>
</dictionary>
</value>
</item>
</dictionary>
</state>
</object>
</value>
</item>
<item>
<key> <string>_params</string> </key>
<value> <string>uid,activity,**kw</string> </value>
</item>
<item>
<key> <string>id</string> </key>
<value> <string>ActivityTool_manageRestart</string> </value>
</item>
</dictionary>
</pickle>
</record>
</ZopeData>
return 'ActivityTool_manageRestart?uid=%s&activity=%s' % (context.uid, context.activity)
return 'manageRestart?message_uid_list:int:list=%s&activity=%s' % (context.uid, context.activity)
......@@ -142,10 +142,6 @@
<string>retry</string>
<string>Retry</string>
</tuple>
<tuple>
<string>processing</string>
<string>Processing</string>
</tuple>
</list>
</value>
</item>
......@@ -221,10 +217,6 @@
<string>retry</string>
<string>Retry</string>
</tuple>
<tuple>
<string>processing</string>
<string>Processing</string>
</tuple>
</list>
</value>
</item>
......@@ -301,10 +293,6 @@
<string>retry</string>
<string></string>
</tuple>
<tuple>
<string>processing</string>
<string></string>
</tuple>
</list>
</value>
</item>
......
......@@ -10,7 +10,7 @@
<table>
<tr>
<th>Type</th>
<th>Message</th>
<th>Count</th>
<th>Method Id</th>
<th>Processing Node</th>
<th>Min pri</th>
......@@ -19,7 +19,7 @@
{{#each messageList1}}
<tr>
<td>{{this.messagetype}} </td>
<td>{{this.message}}</td>
<td>{{this.count}}</td>
<td>{{this.method_id}}</td>
<td>{{this.node}}</td>
<td>{{this.min_pri}}</td>
......@@ -29,7 +29,7 @@
{{#each messageList2}}
<tr>
<td>{{this.messagetype}} </td>
<td>{{this.message}}</td>
<td>{{this.count}}</td>
<td>{{this.method_id}}</td>
<td>{{this.node}}</td>
<td>{{this.min_pri}}</td>
......@@ -40,7 +40,7 @@
<table>
<tr>
<th>Type</th>
<th>Pri</th>
<th>Priority</th>
<th>Min</th>
<th>Avg</th>
<th>Max</th>
......
......@@ -482,6 +482,10 @@ class DB(TM):
if m[0] not in hosed_connection:
raise
def getMaxAllowedPacket(self):
# minus 2-bytes overhead from mysql library
return self._query("SELECT @@max_allowed_packet-2").fetch_row()[0][0]
@contextmanager
def lock(self):
"""Lock for the connected DB"""
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment