Commit f31c9ab4 authored by Arnaud Fontaine's avatar Arnaud Fontaine

Do not consider an error on the first iteration a specific case.

Before, if the first iteration failed, the whole execution failed even if the
number of maximum errors specified on the command line argument was not
reached, which was an issue especially when running with a range of users.

The results are now adjusted dynamically after the first successful iteration
rather than at the first iteration.
parent b1712e6c
......@@ -37,6 +37,7 @@ import datetime
import socket
from ..testbrowser.browser import Browser
from .result import NothingFlushedException
RESULT_NUMBER_BEFORE_FLUSHING = 100
......@@ -84,6 +85,7 @@ class BenchmarkProcess(multiprocessing.Process):
for target_idx, target in enumerate(self._argument_namespace.benchmark_suite_list):
self._logger.debug("EXECUTE: %s" % target)
result.enterSuite(target.__name__)
with_error = False
try:
self._browser.open()
......@@ -100,11 +102,11 @@ class BenchmarkProcess(multiprocessing.Process):
pass
self._error_counter += 1
if (self._current_repeat == 1 or
self._error_counter >= self._argument_namespace.max_error_number):
if self._error_counter >= self._argument_namespace.max_error_number:
raise RuntimeError(msg)
self._logger.warning(msg)
with_error = True
else:
for stat in result.getCurrentSuiteStatList():
......@@ -122,7 +124,7 @@ class BenchmarkProcess(multiprocessing.Process):
raise RuntimeError("Stopping as mean is greater than maximum "
"global average")
result.exitSuite()
result.exitSuite(with_error)
result.iterationFinished()
......@@ -167,7 +169,10 @@ class BenchmarkProcess(multiprocessing.Process):
self.runBenchmarkSuiteList(result)
if not self._current_repeat % RESULT_NUMBER_BEFORE_FLUSHING:
result.flush()
try:
result.flush()
except NothingFlushedException:
pass
self._current_repeat += 1
......
......@@ -44,7 +44,6 @@ class BenchmarkResultStatistic(object):
self.minimum = sys.maxint
self.maximum = -1
self.n = 0
self.error_sum = 0
# For calculating the mean
self._value_sum = 0
......@@ -53,9 +52,6 @@ class BenchmarkResultStatistic(object):
self._variance_sum = 0
self._mean = 0
def add_error(self):
self.error_sum += 1
def add(self, value):
if value < self.minimum:
self.minimum = value
......@@ -77,6 +73,9 @@ class BenchmarkResultStatistic(object):
def standard_deviation(self):
return math.sqrt(self._variance_sum / self.n)
class NothingFlushedException(Exception):
pass
import abc
class BenchmarkResult(object):
......@@ -86,16 +85,14 @@ class BenchmarkResult(object):
self._argument_namespace = argument_namespace
self._nb_users = nb_users
self._user_index = user_index
self._stat_list = []
self._suite_idx = 0
self._result_idx = 0
self.result_list = []
self._all_result_list = []
self._first_iteration = True
self._current_suite_name = None
self._result_idx_checkpoint_list = []
self.label_list = []
self._logger = None
self._label_list = None
self._all_suite_list = []
self._current_suite_index = 0
self._all_result_list = []
self._current_suite_dict = None
self._current_result_list = None
@property
def logger(self):
......@@ -113,62 +110,106 @@ class BenchmarkResult(object):
return self
def enterSuite(self, name):
self._current_suite_name = name
try:
self._current_suite_dict = self._all_suite_list[self._current_suite_index]
except IndexError:
self._current_suite_dict = {'name': name,
'all_result_list': [],
'stat_list': [],
# Number of expected results
'expected': -1}
def __call__(self, label, value):
self.result_list.append(value)
self._all_suite_list.append(self._current_suite_dict)
self._current_result_list = []
def __call__(self, label, value):
try:
result_statistic = self._stat_list[self._result_idx]
result_statistic = \
self._current_suite_dict['stat_list'][len(self._current_result_list)]
except IndexError:
result_statistic = BenchmarkResultStatistic(self._current_suite_name,
label)
result_statistic = BenchmarkResultStatistic(
self._current_suite_dict['name'], label)
self._stat_list.append(result_statistic)
self._current_suite_dict['stat_list'].append(result_statistic)
result_statistic.add(value)
self._result_idx += 1
self._current_result_list.append(value)
def getLabelList(self):
return [ stat.full_label for stat in self._stat_list ]
@property
def label_list(self):
if self._label_list:
return self._label_list
def iterationFinished(self):
self._all_result_list.append(self.result_list)
if self._first_iteration:
self.label_list = self.getLabelList()
# TODO: Should perhaps be cached...
label_list = []
for suite_dict in self._all_suite_list:
if suite_dict['expected'] == -1:
return None
label_list.extend([ stat.full_label for stat in suite_dict['stat_list'] ])
self.logger.debug("RESULTS: %s" % self.result_list)
self.result_list = []
self._first_iteration = False
self._suite_idx = 0
self._result_idx = 0
self._label_list = label_list
return label_list
def getCurrentSuiteStatList(self):
start_index = self._suite_idx and \
self._result_idx_checkpoint_list[self._suite_idx - 1] or 0
return self._current_suite_dict['stat_list']
@staticmethod
def _addResultWithError(result_list, expected_len):
missing_result_n = expected_len - len(result_list)
if missing_result_n > 0:
result_list.extend(missing_result_n * [-1])
return self._stat_list[start_index:self._result_idx]
def exitSuite(self, with_error=False):
if with_error:
if self._current_suite_dict['expected'] != -1:
self._addResultWithError(self._current_result_list,
self._current_suite_dict['expected'])
def exitSuite(self):
if self._first_iteration:
self._result_idx_checkpoint_list.append(self._result_idx)
else:
expected_result_idx = self._result_idx_checkpoint_list[self._suite_idx]
while self._result_idx != expected_result_idx:
self.result_list.append(0)
self._stat_list[self._result_idx].add_error()
self._result_idx += 1
if self._current_suite_dict['expected'] == -1:
self._current_suite_dict['expected'] = len(self._current_result_list)
# Fix previous results
for result_list in self._current_suite_dict['all_result_list']:
self._addResultWithError(result_list,
self._current_suite_dict['expected'])
self._suite_idx += 1
self._current_suite_dict['all_result_list'].append(self._current_result_list)
self._current_suite_index += 1
def iterationFinished(self):
self._current_suite_index = 0
@abc.abstractmethod
def flush(self, partial=True):
self._all_result_list = []
# TODO: Should perhaps be cached...
all_result_list = []
for result_dict in self._all_suite_list:
if result_dict['expected'] == -1:
raise NothingFlushedException()
for index, result_list in enumerate(result_dict['all_result_list']):
try:
all_result_list[index].extend(result_list)
except IndexError:
all_result_list.append(result_list)
result_dict['all_result_list'] = []
return all_result_list
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback_object):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
self.flush(partial=False)
try:
self.flush(partial=False)
except NothingFlushedException:
pass
return True
class CSVBenchmarkResult(BenchmarkResult):
......@@ -207,15 +248,15 @@ class CSVBenchmarkResult(BenchmarkResult):
return self
def flush(self, partial=True):
result_list = super(CSVBenchmarkResult, self).flush(partial)
if self._result_file.tell() == 0:
self._csv_writer.writerow(self.label_list)
self._csv_writer.writerows(self._all_result_list)
self._csv_writer.writerows(result_list)
self._result_file.flush()
os.fsync(self._result_file.fileno())
super(CSVBenchmarkResult, self).flush(partial)
def __exit__(self, exc_type, exc_value, traceback_object):
super(CSVBenchmarkResult, self).__exit__(exc_type, exc_value,
traceback_object)
......@@ -251,6 +292,8 @@ class ERP5BenchmarkResult(BenchmarkResult):
self.log_file.seek(0)
def flush(self, partial=True):
result_list = super(ERP5BenchmarkResult, self).flush()
benchmark_result = xmlrpclib.ServerProxy(
self._argument_namespace.erp5_publish_url,
verbose=True,
......@@ -261,12 +304,10 @@ class ERP5BenchmarkResult(BenchmarkResult):
self._argument_namespace.repeat,
self._nb_users,
self._argument_namespace.benchmark_suite_name_list,
self.getLabelList(),
self._all_result_list,
self.label_list,
result_list,
self._log_buffer_list)
super(ERP5BenchmarkResult, self).flush()
def __exit__(self, exc_type, exc_value, traceback_object):
super(ERP5BenchmarkResult, self).__exit__(exc_type, exc_value,
traceback_object)
......
......@@ -28,14 +28,18 @@
#
##############################################################################
from .result import CSVBenchmarkResult
from .result import CSVBenchmarkResult, NothingFlushedException
class CSVScalabilityBenchmarkResult(CSVBenchmarkResult):
def flush(self, partial=True):
super(CSVScalabilityBenchmarkResult, self).flush(partial)
self._argument_namespace.notify_method(self._result_filename,
self._result_file.tell(),
partial=partial)
try:
super(CSVScalabilityBenchmarkResult, self).flush(partial)
except NothingFlushedException:
pass
else:
self._argument_namespace.notify_method(self._result_filename,
self._result_file.tell(),
partial=partial)
from .performance_tester import PerformanceTester
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment