Commit f31c9ab4 authored by Arnaud Fontaine's avatar Arnaud Fontaine

Do not consider an error on the first iteration a specific case.

Before, if the first iteration failed, the whole execution failed even if the
number of maximum errors specified on the command line argument was not
reached, which was an issue especially when running with a range of users.

The results are now adjusted dynamically after the first successful iteration
rather than at the first iteration.
parent b1712e6c
...@@ -37,6 +37,7 @@ import datetime ...@@ -37,6 +37,7 @@ import datetime
import socket import socket
from ..testbrowser.browser import Browser from ..testbrowser.browser import Browser
from .result import NothingFlushedException
RESULT_NUMBER_BEFORE_FLUSHING = 100 RESULT_NUMBER_BEFORE_FLUSHING = 100
...@@ -84,6 +85,7 @@ class BenchmarkProcess(multiprocessing.Process): ...@@ -84,6 +85,7 @@ class BenchmarkProcess(multiprocessing.Process):
for target_idx, target in enumerate(self._argument_namespace.benchmark_suite_list): for target_idx, target in enumerate(self._argument_namespace.benchmark_suite_list):
self._logger.debug("EXECUTE: %s" % target) self._logger.debug("EXECUTE: %s" % target)
result.enterSuite(target.__name__) result.enterSuite(target.__name__)
with_error = False
try: try:
self._browser.open() self._browser.open()
...@@ -100,11 +102,11 @@ class BenchmarkProcess(multiprocessing.Process): ...@@ -100,11 +102,11 @@ class BenchmarkProcess(multiprocessing.Process):
pass pass
self._error_counter += 1 self._error_counter += 1
if (self._current_repeat == 1 or if self._error_counter >= self._argument_namespace.max_error_number:
self._error_counter >= self._argument_namespace.max_error_number):
raise RuntimeError(msg) raise RuntimeError(msg)
self._logger.warning(msg) self._logger.warning(msg)
with_error = True
else: else:
for stat in result.getCurrentSuiteStatList(): for stat in result.getCurrentSuiteStatList():
...@@ -122,7 +124,7 @@ class BenchmarkProcess(multiprocessing.Process): ...@@ -122,7 +124,7 @@ class BenchmarkProcess(multiprocessing.Process):
raise RuntimeError("Stopping as mean is greater than maximum " raise RuntimeError("Stopping as mean is greater than maximum "
"global average") "global average")
result.exitSuite() result.exitSuite(with_error)
result.iterationFinished() result.iterationFinished()
...@@ -167,7 +169,10 @@ class BenchmarkProcess(multiprocessing.Process): ...@@ -167,7 +169,10 @@ class BenchmarkProcess(multiprocessing.Process):
self.runBenchmarkSuiteList(result) self.runBenchmarkSuiteList(result)
if not self._current_repeat % RESULT_NUMBER_BEFORE_FLUSHING: if not self._current_repeat % RESULT_NUMBER_BEFORE_FLUSHING:
result.flush() try:
result.flush()
except NothingFlushedException:
pass
self._current_repeat += 1 self._current_repeat += 1
......
...@@ -44,7 +44,6 @@ class BenchmarkResultStatistic(object): ...@@ -44,7 +44,6 @@ class BenchmarkResultStatistic(object):
self.minimum = sys.maxint self.minimum = sys.maxint
self.maximum = -1 self.maximum = -1
self.n = 0 self.n = 0
self.error_sum = 0
# For calculating the mean # For calculating the mean
self._value_sum = 0 self._value_sum = 0
...@@ -53,9 +52,6 @@ class BenchmarkResultStatistic(object): ...@@ -53,9 +52,6 @@ class BenchmarkResultStatistic(object):
self._variance_sum = 0 self._variance_sum = 0
self._mean = 0 self._mean = 0
def add_error(self):
self.error_sum += 1
def add(self, value): def add(self, value):
if value < self.minimum: if value < self.minimum:
self.minimum = value self.minimum = value
...@@ -77,6 +73,9 @@ class BenchmarkResultStatistic(object): ...@@ -77,6 +73,9 @@ class BenchmarkResultStatistic(object):
def standard_deviation(self): def standard_deviation(self):
return math.sqrt(self._variance_sum / self.n) return math.sqrt(self._variance_sum / self.n)
class NothingFlushedException(Exception):
pass
import abc import abc
class BenchmarkResult(object): class BenchmarkResult(object):
...@@ -86,16 +85,14 @@ class BenchmarkResult(object): ...@@ -86,16 +85,14 @@ class BenchmarkResult(object):
self._argument_namespace = argument_namespace self._argument_namespace = argument_namespace
self._nb_users = nb_users self._nb_users = nb_users
self._user_index = user_index self._user_index = user_index
self._stat_list = []
self._suite_idx = 0
self._result_idx = 0
self.result_list = []
self._all_result_list = []
self._first_iteration = True
self._current_suite_name = None
self._result_idx_checkpoint_list = []
self.label_list = []
self._logger = None self._logger = None
self._label_list = None
self._all_suite_list = []
self._current_suite_index = 0
self._all_result_list = []
self._current_suite_dict = None
self._current_result_list = None
@property @property
def logger(self): def logger(self):
...@@ -113,62 +110,106 @@ class BenchmarkResult(object): ...@@ -113,62 +110,106 @@ class BenchmarkResult(object):
return self return self
def enterSuite(self, name): def enterSuite(self, name):
self._current_suite_name = name try:
self._current_suite_dict = self._all_suite_list[self._current_suite_index]
except IndexError:
self._current_suite_dict = {'name': name,
'all_result_list': [],
'stat_list': [],
# Number of expected results
'expected': -1}
def __call__(self, label, value): self._all_suite_list.append(self._current_suite_dict)
self.result_list.append(value)
self._current_result_list = []
def __call__(self, label, value):
try: try:
result_statistic = self._stat_list[self._result_idx] result_statistic = \
self._current_suite_dict['stat_list'][len(self._current_result_list)]
except IndexError: except IndexError:
result_statistic = BenchmarkResultStatistic(self._current_suite_name, result_statistic = BenchmarkResultStatistic(
label) self._current_suite_dict['name'], label)
self._stat_list.append(result_statistic) self._current_suite_dict['stat_list'].append(result_statistic)
result_statistic.add(value) result_statistic.add(value)
self._result_idx += 1 self._current_result_list.append(value)
def getLabelList(self): @property
return [ stat.full_label for stat in self._stat_list ] def label_list(self):
if self._label_list:
return self._label_list
def iterationFinished(self): # TODO: Should perhaps be cached...
self._all_result_list.append(self.result_list) label_list = []
if self._first_iteration: for suite_dict in self._all_suite_list:
self.label_list = self.getLabelList() if suite_dict['expected'] == -1:
return None
label_list.extend([ stat.full_label for stat in suite_dict['stat_list'] ])
self.logger.debug("RESULTS: %s" % self.result_list) self._label_list = label_list
self.result_list = [] return label_list
self._first_iteration = False
self._suite_idx = 0
self._result_idx = 0
def getCurrentSuiteStatList(self): def getCurrentSuiteStatList(self):
start_index = self._suite_idx and \ return self._current_suite_dict['stat_list']
self._result_idx_checkpoint_list[self._suite_idx - 1] or 0
@staticmethod
def _addResultWithError(result_list, expected_len):
missing_result_n = expected_len - len(result_list)
if missing_result_n > 0:
result_list.extend(missing_result_n * [-1])
return self._stat_list[start_index:self._result_idx] def exitSuite(self, with_error=False):
if with_error:
if self._current_suite_dict['expected'] != -1:
self._addResultWithError(self._current_result_list,
self._current_suite_dict['expected'])
def exitSuite(self):
if self._first_iteration:
self._result_idx_checkpoint_list.append(self._result_idx)
else: else:
expected_result_idx = self._result_idx_checkpoint_list[self._suite_idx] if self._current_suite_dict['expected'] == -1:
while self._result_idx != expected_result_idx: self._current_suite_dict['expected'] = len(self._current_result_list)
self.result_list.append(0)
self._stat_list[self._result_idx].add_error() # Fix previous results
self._result_idx += 1 for result_list in self._current_suite_dict['all_result_list']:
self._addResultWithError(result_list,
self._current_suite_dict['expected'])
self._suite_idx += 1 self._current_suite_dict['all_result_list'].append(self._current_result_list)
self._current_suite_index += 1
def iterationFinished(self):
self._current_suite_index = 0
@abc.abstractmethod @abc.abstractmethod
def flush(self, partial=True): def flush(self, partial=True):
self._all_result_list = [] # TODO: Should perhaps be cached...
all_result_list = []
for result_dict in self._all_suite_list:
if result_dict['expected'] == -1:
raise NothingFlushedException()
for index, result_list in enumerate(result_dict['all_result_list']):
try:
all_result_list[index].extend(result_list)
except IndexError:
all_result_list.append(result_list)
result_dict['all_result_list'] = []
return all_result_list
@abc.abstractmethod @abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback_object): def __exit__(self, exc_type, exc_value, traceback_object):
signal.signal(signal.SIGTERM, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN)
self.flush(partial=False)
try:
self.flush(partial=False)
except NothingFlushedException:
pass
return True return True
class CSVBenchmarkResult(BenchmarkResult): class CSVBenchmarkResult(BenchmarkResult):
...@@ -207,15 +248,15 @@ class CSVBenchmarkResult(BenchmarkResult): ...@@ -207,15 +248,15 @@ class CSVBenchmarkResult(BenchmarkResult):
return self return self
def flush(self, partial=True): def flush(self, partial=True):
result_list = super(CSVBenchmarkResult, self).flush(partial)
if self._result_file.tell() == 0: if self._result_file.tell() == 0:
self._csv_writer.writerow(self.label_list) self._csv_writer.writerow(self.label_list)
self._csv_writer.writerows(self._all_result_list) self._csv_writer.writerows(result_list)
self._result_file.flush() self._result_file.flush()
os.fsync(self._result_file.fileno()) os.fsync(self._result_file.fileno())
super(CSVBenchmarkResult, self).flush(partial)
def __exit__(self, exc_type, exc_value, traceback_object): def __exit__(self, exc_type, exc_value, traceback_object):
super(CSVBenchmarkResult, self).__exit__(exc_type, exc_value, super(CSVBenchmarkResult, self).__exit__(exc_type, exc_value,
traceback_object) traceback_object)
...@@ -251,6 +292,8 @@ class ERP5BenchmarkResult(BenchmarkResult): ...@@ -251,6 +292,8 @@ class ERP5BenchmarkResult(BenchmarkResult):
self.log_file.seek(0) self.log_file.seek(0)
def flush(self, partial=True): def flush(self, partial=True):
result_list = super(ERP5BenchmarkResult, self).flush()
benchmark_result = xmlrpclib.ServerProxy( benchmark_result = xmlrpclib.ServerProxy(
self._argument_namespace.erp5_publish_url, self._argument_namespace.erp5_publish_url,
verbose=True, verbose=True,
...@@ -261,12 +304,10 @@ class ERP5BenchmarkResult(BenchmarkResult): ...@@ -261,12 +304,10 @@ class ERP5BenchmarkResult(BenchmarkResult):
self._argument_namespace.repeat, self._argument_namespace.repeat,
self._nb_users, self._nb_users,
self._argument_namespace.benchmark_suite_name_list, self._argument_namespace.benchmark_suite_name_list,
self.getLabelList(), self.label_list,
self._all_result_list, result_list,
self._log_buffer_list) self._log_buffer_list)
super(ERP5BenchmarkResult, self).flush()
def __exit__(self, exc_type, exc_value, traceback_object): def __exit__(self, exc_type, exc_value, traceback_object):
super(ERP5BenchmarkResult, self).__exit__(exc_type, exc_value, super(ERP5BenchmarkResult, self).__exit__(exc_type, exc_value,
traceback_object) traceback_object)
......
...@@ -28,14 +28,18 @@ ...@@ -28,14 +28,18 @@
# #
############################################################################## ##############################################################################
from .result import CSVBenchmarkResult from .result import CSVBenchmarkResult, NothingFlushedException
class CSVScalabilityBenchmarkResult(CSVBenchmarkResult): class CSVScalabilityBenchmarkResult(CSVBenchmarkResult):
def flush(self, partial=True): def flush(self, partial=True):
super(CSVScalabilityBenchmarkResult, self).flush(partial) try:
self._argument_namespace.notify_method(self._result_filename, super(CSVScalabilityBenchmarkResult, self).flush(partial)
self._result_file.tell(), except NothingFlushedException:
partial=partial) pass
else:
self._argument_namespace.notify_method(self._result_filename,
self._result_file.tell(),
partial=partial)
from .performance_tester import PerformanceTester from .performance_tester import PerformanceTester
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment