From c4b9e628b2b56ed117d1fe8c494e65e93ffbcf27 Mon Sep 17 00:00:00 2001
From: Arnaud Fontaine <arnaud.fontaine@nexedi.com>
Date: Tue, 30 Aug 2011 08:11:09 +0000
Subject: [PATCH] Reorganize the code and fix relative imports

git-svn-id: https://svn.erp5.org/repos/public/erp5/trunk/utils@46012 20353a03-c40f-0410-a6d1-a30d3c3de9de
---
 erp5/util/benchmark/argument.py               | 118 ++++++++++
 erp5/util/benchmark/performance_tester.py     |   6 +-
 erp5/util/benchmark/process.py                | 142 ++++++++++++
 erp5/util/benchmark/report.py                 |   2 +-
 .../benchmark/{benchmark.py => result.py}     | 208 +-----------------
 erp5/util/benchmark/scalability_tester.py     |   2 +-
 6 files changed, 268 insertions(+), 210 deletions(-)
 create mode 100644 erp5/util/benchmark/argument.py
 create mode 100644 erp5/util/benchmark/process.py
 rename erp5/util/benchmark/{benchmark.py => result.py} (60%)
 mode change 100644 => 100755 erp5/util/benchmark/scalability_tester.py

diff --git a/erp5/util/benchmark/argument.py b/erp5/util/benchmark/argument.py
new file mode 100644
index 0000000000..d91440fc22
--- /dev/null
+++ b/erp5/util/benchmark/argument.py
@@ -0,0 +1,118 @@
+##############################################################################
+#
+# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
+#                    Arnaud Fontaine <arnaud.fontaine@nexedi.com>
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsability of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# garantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+
+import os
+import argparse
+import functools
+
+class ArgumentType(object):
+  @classmethod
+  def directoryType(cls, path):
+    if not (os.path.isdir(path) and os.access(path, os.W_OK)):
+      raise argparse.ArgumentTypeError("'%s' is not a valid directory or is "\
+                                         "not writable" % path)
+
+    return path
+
+  @classmethod
+  def objectFromModule(cls, module_name, object_name=None,
+                       callable_object=False):
+    if module_name.endswith('.py'):
+      module_name = module_name[:-3]
+
+    if not object_name:
+      object_name = module_name
+
+    import sys
+    sys.path.append(os.getcwd())
+
+    try:
+      module = __import__(module_name, globals(), locals(), [object_name], -1)
+    except Exception, e:
+      raise argparse.ArgumentTypeError("Cannot import '%s.%s': %s" % \
+                                         (module_name, object_name, str(e)))
+
+    try:
+      obj = getattr(module, object_name)
+    except AttributeError:
+      raise argparse.ArgumentTypeError("Could not get '%s' in '%s'" % \
+                                         (object_name, module_name))
+
+    if callable_object and not callable(obj):
+      raise argparse.ArgumentTypeError(
+        "'%s.%s' is not callable" % (module_name, object_name))
+
+    return obj
+
+  @classmethod
+  def strictlyPositiveIntType(cls, value):
+    try:
+      converted_value = int(value)
+    except ValueError:
+      pass
+    else:
+      if converted_value > 0:
+        return converted_value
+
+    raise argparse.ArgumentTypeError('expects a strictly positive integer')
+
+  @classmethod
+  def strictlyPositiveIntOrRangeType(cls, value):
+    try:
+      return cls.strictlyPositiveIntType(value)
+    except argparse.ArgumentTypeError:
+      try:
+        min_max_list = value.split(',')
+      except ValueError:
+        pass
+      else:
+        if len(min_max_list) == 2:
+          minimum, maximum = cls.strictlyPositiveIntType(min_max_list[0]), \
+              cls.strictlyPositiveIntType(min_max_list[1])
+
+          if minimum >= maximum:
+            raise argparse.ArgumentTypeError('%d >= %d' % (minimum, maximum))
+
+          return (minimum, maximum)
+
+    raise argparse.ArgumentTypeError(
+      'expects either a strictly positive integer or a range of strictly '
+      'positive integer separated by a comma')
+
+  @classmethod
+  def ERP5UrlType(cls, url):
+    if url[-1] == '/':
+      url_list = url.rsplit('/', 2)[:-1]
+    else:
+      url_list = url.rsplit('/', 1)
+
+    url_list[0] = url_list[0] + '/'
+    if len(url_list) != 2:
+      raise argparse.ArgumentTypeError("Invalid URL given")
+
+    return url_list
diff --git a/erp5/util/benchmark/performance_tester.py b/erp5/util/benchmark/performance_tester.py
index 82ebeafe3c..78dad99eb4 100755
--- a/erp5/util/benchmark/performance_tester.py
+++ b/erp5/util/benchmark/performance_tester.py
@@ -34,7 +34,9 @@ import sys
 import multiprocessing
 import xmlrpclib
 
-from benchmark import ArgumentType, BenchmarkProcess, ERP5BenchmarkResult
+from erp5.utils.benchmark.argument import ArgumentType
+from erp5.utils.benchmark.process import BenchmarkProcess
+from erp5.utils.benchmark.result import ERP5BenchmarkResult, CSVBenchmarkResult
 
 class PerformanceTester(object):
   def __init__(self, namespace=None):
@@ -166,10 +168,8 @@ class PerformanceTester(object):
 
   def getResultClass(self):
     if self._argument_namespace.erp5_publish_url:
-      from benchmark import ERP5BenchmarkResult
       return ERP5BenchmarkResult
     else:
-      from benchmark import CSVBenchmarkResult
       return CSVBenchmarkResult
 
   def preRun(self):
diff --git a/erp5/util/benchmark/process.py b/erp5/util/benchmark/process.py
new file mode 100644
index 0000000000..3617619222
--- /dev/null
+++ b/erp5/util/benchmark/process.py
@@ -0,0 +1,142 @@
+##############################################################################
+#
+# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
+#                    Arnaud Fontaine <arnaud.fontaine@nexedi.com>
+#
+# WARNING: This program as such is intended to be used by professional
+# programmers who take the whole responsability of assessing all potential
+# consequences resulting from its eventual inadequacies and bugs
+# End users who are looking for a ready-to-use solution with commercial
+# garantees and support are strongly adviced to contract a Free Software
+# Service Company
+#
+# This program is Free Software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+#
+##############################################################################
+
+import multiprocessing
+import csv
+import traceback
+import os
+import logging
+import signal
+import sys
+
+from erp5.utils.test_browser.browser import Browser
+
+class BenchmarkProcess(multiprocessing.Process):
+  def __init__(self, exit_msg_queue, result_klass, argument_namespace,
+               nb_users, user_index, *args, **kwargs):
+    self._exit_msg_queue = exit_msg_queue
+    self._result_klass = result_klass
+    self._argument_namespace = argument_namespace
+    self._nb_users = nb_users
+    self._user_index = user_index
+
+    # Initialized when running the test
+    self._browser = None
+    self._current_repeat = 1
+
+    super(BenchmarkProcess, self).__init__(*args, **kwargs)
+
+  def stopGracefully(self, *args, **kwargs):
+    raise StopIteration, "Interrupted by user"
+
+  def getBrowser(self, log_file):
+    info_list = tuple(self._argument_namespace.url) + \
+        tuple(self._argument_namespace.user_tuple[self._user_index])
+
+    return Browser(*info_list,
+                   is_debug=self._argument_namespace.enable_debug,
+                   log_file=log_file,
+                   is_legacy_listbox=self._argument_namespace.is_legacy_listbox)
+
+  def runBenchmarkSuiteList(self, result):
+    for target_idx, target in enumerate(self._argument_namespace.benchmark_suite_list):
+      self._logger.debug("EXECUTE: %s" % target)
+      result.enterSuite(target.__name__)
+
+      try:
+        target(result, self._browser)
+      except:
+        msg = "%s: %s" % (target, traceback.format_exc())
+        if self._argument_namespace.enable_debug:
+          try:
+            msg += self._browser.normalized_contents
+          except:
+            pass
+
+        if self._current_repeat == 1:
+          self._logger.error(msg)
+          raise
+
+        self._logger.warning(msg)
+
+      for stat in result.getCurrentSuiteStatList():
+        mean = stat.mean
+
+        self._logger.info("%s: min=%.3f, mean=%.3f (+/- %.3f), max=%.3f" % \
+                            (stat.full_label,
+                             stat.minimum,
+                             mean,
+                             stat.standard_deviation,
+                             stat.maximum))
+
+        if self._argument_namespace.max_global_average and \
+           mean > self._argument_namespace.max_global_average:
+          self._logger.info("Stopping as mean is greater than maximum "
+                            "global average")
+
+          raise StopIteration
+
+      result.exitSuite()
+
+    result.iterationFinished()
+
+  def run(self):
+    result_instance = self._result_klass(self._argument_namespace,
+                                         self._nb_users,
+                                         self._user_index)
+
+    self._logger = result_instance.getLogger()
+
+    if self._argument_namespace.repeat != -1:
+      signal.signal(signal.SIGTERM, self.stopGracefully)
+
+    exit_status = 0
+    exit_msg = None
+
+    try:
+      with result_instance as result:
+        self._browser = self.getBrowser(result_instance.log_file)
+
+        while self._current_repeat != (self._argument_namespace.repeat + 1):
+          self._logger.info("Iteration: %d" % self._current_repeat)
+          self.runBenchmarkSuiteList(result)
+          self._current_repeat += 1
+
+          if self._current_repeat == 100:
+            result.flush()
+
+    except StopIteration, e:
+      exit_msg = str(e)
+      exit_status = 1
+
+    except BaseException, e:
+      exit_msg = e
+      exit_status = 2
+
+    self._exit_msg_queue.put(exit_msg)
+    sys.exit(exit_status)
diff --git a/erp5/util/benchmark/report.py b/erp5/util/benchmark/report.py
index 2e3071e4d9..fc1e27fad7 100755
--- a/erp5/util/benchmark/report.py
+++ b/erp5/util/benchmark/report.py
@@ -63,7 +63,7 @@ def parseArguments():
 
 import csv
 
-from benchmark import BenchmarkResultStatistic
+from erp5.utils.benchmark.result import BenchmarkResultStatistic
 
 def computeStatisticFromFilenameList(argument_namespace, filename_list):
   reader_list = []
diff --git a/erp5/util/benchmark/benchmark.py b/erp5/util/benchmark/result.py
similarity index 60%
rename from erp5/util/benchmark/benchmark.py
rename to erp5/util/benchmark/result.py
index 905e2db929..8a143956b7 100644
--- a/erp5/util/benchmark/benchmark.py
+++ b/erp5/util/benchmark/result.py
@@ -26,98 +26,11 @@
 #
 ##############################################################################
 
-import argparse
-import functools
-
-class ArgumentType(object):
-  @classmethod
-  def directoryType(cls, path):
-    if not (os.path.isdir(path) and os.access(path, os.W_OK)):
-      raise argparse.ArgumentTypeError("'%s' is not a valid directory or is "\
-                                         "not writable" % path)
-
-    return path
-
-  @classmethod
-  def objectFromModule(cls, module_name, object_name=None,
-                       callable_object=False):
-    if module_name.endswith('.py'):
-      module_name = module_name[:-3]
-
-    if not object_name:
-      object_name = module_name
-
-    import sys
-    sys.path.append(os.getcwd())
-
-    try:
-      module = __import__(module_name, globals(), locals(), [object_name], -1)
-    except Exception, e:
-      raise argparse.ArgumentTypeError("Cannot import '%s.%s': %s" % \
-                                         (module_name, object_name, str(e)))
-
-    try:
-      obj = getattr(module, object_name)
-    except AttributeError:
-      raise argparse.ArgumentTypeError("Could not get '%s' in '%s'" % \
-                                         (object_name, module_name))
-
-    if callable_object and not callable(obj):
-      raise argparse.ArgumentTypeError(
-        "'%s.%s' is not callable" % (module_name, object_name))
-
-    return obj
-
-  @classmethod
-  def strictlyPositiveIntType(cls, value):
-    try:
-      converted_value = int(value)
-    except ValueError:
-      pass
-    else:
-      if converted_value > 0:
-        return converted_value
-
-    raise argparse.ArgumentTypeError('expects a strictly positive integer')
-
-  @classmethod
-  def strictlyPositiveIntOrRangeType(cls, value):
-    try:
-      return cls.strictlyPositiveIntType(value)
-    except argparse.ArgumentTypeError:
-      try:
-        min_max_list = value.split(',')
-      except ValueError:
-        pass
-      else:
-        if len(min_max_list) == 2:
-          minimum, maximum = cls.strictlyPositiveIntType(min_max_list[0]), \
-              cls.strictlyPositiveIntType(min_max_list[1])
-
-          if minimum >= maximum:
-            raise argparse.ArgumentTypeError('%d >= %d' % (minimum, maximum))
-
-          return (minimum, maximum)
-
-    raise argparse.ArgumentTypeError(
-      'expects either a strictly positive integer or a range of strictly '
-      'positive integer separated by a comma')
-
-  @classmethod
-  def ERP5UrlType(cls, url):
-    if url[-1] == '/':
-      url_list = url.rsplit('/', 2)[:-1]
-    else:
-      url_list = url.rsplit('/', 1)
-
-    url_list[0] = url_list[0] + '/'
-    if len(url_list) != 2:
-      raise argparse.ArgumentTypeError("Invalid URL given")
-
-    return url_list
-
 import sys
 import math
+import os
+import csv
+import logging
 
 class BenchmarkResultStatistic(object):
   def __init__(self, suite, label):
@@ -375,118 +288,3 @@ class ERP5BenchmarkResult(BenchmarkResult):
 
     result.BenchmarkResult_completed(error_message_set and 'FAIL' or 'PASS',
                                      error_message_set)
-
-import multiprocessing
-import csv
-import traceback
-import os
-import logging
-import signal
-import sys
-
-from erp5.utils.test_browser.browser import Browser
-
-class BenchmarkProcess(multiprocessing.Process):
-  def __init__(self, exit_msg_queue, result_klass, argument_namespace,
-               nb_users, user_index, *args, **kwargs):
-    self._exit_msg_queue = exit_msg_queue
-    self._result_klass = result_klass
-    self._argument_namespace = argument_namespace
-    self._nb_users = nb_users
-    self._user_index = user_index
-
-    # Initialized when running the test
-    self._browser = None
-    self._current_repeat = 1
-
-    super(BenchmarkProcess, self).__init__(*args, **kwargs)
-
-  def stopGracefully(self, *args, **kwargs):
-    raise StopIteration, "Interrupted by user"
-
-  def getBrowser(self, log_file):
-    info_list = tuple(self._argument_namespace.url) + \
-        tuple(self._argument_namespace.user_tuple[self._user_index])
-
-    return Browser(*info_list,
-                   is_debug=self._argument_namespace.enable_debug,
-                   log_file=log_file,
-                   is_legacy_listbox=self._argument_namespace.is_legacy_listbox)
-
-  def runBenchmarkSuiteList(self, result):
-    for target_idx, target in enumerate(self._argument_namespace.benchmark_suite_list):
-      self._logger.debug("EXECUTE: %s" % target)
-      result.enterSuite(target.__name__)
-
-      try:
-        target(result, self._browser)
-      except:
-        msg = "%s: %s" % (target, traceback.format_exc())
-        if self._argument_namespace.enable_debug:
-          try:
-            msg += self._browser.normalized_contents
-          except:
-            pass
-
-        if self._current_repeat == 1:
-          self._logger.error(msg)
-          raise
-
-        self._logger.warning(msg)
-
-      for stat in result.getCurrentSuiteStatList():
-        mean = stat.mean
-
-        self._logger.info("%s: min=%.3f, mean=%.3f (+/- %.3f), max=%.3f" % \
-                            (stat.full_label,
-                             stat.minimum,
-                             mean,
-                             stat.standard_deviation,
-                             stat.maximum))
-
-        if self._argument_namespace.max_global_average and \
-           mean > self._argument_namespace.max_global_average:
-          self._logger.info("Stopping as mean is greater than maximum "
-                            "global average")
-
-          raise StopIteration
-
-      result.exitSuite()
-
-    result.iterationFinished()
-
-  def run(self):
-    result_instance = self._result_klass(self._argument_namespace,
-                                         self._nb_users,
-                                         self._user_index)
-
-    self._logger = result_instance.getLogger()
-
-    if self._argument_namespace.repeat != -1:
-      signal.signal(signal.SIGTERM, self.stopGracefully)
-
-    exit_status = 0
-    exit_msg = None
-
-    try:
-      with result_instance as result:
-        self._browser = self.getBrowser(result_instance.log_file)
-
-        while self._current_repeat != (self._argument_namespace.repeat + 1):
-          self._logger.info("Iteration: %d" % self._current_repeat)
-          self.runBenchmarkSuiteList(result)
-          self._current_repeat += 1
-
-          if self._current_repeat == 100:
-            result.flush()
-
-    except StopIteration, e:
-      exit_msg = str(e)
-      exit_status = 1
-
-    except BaseException, e:
-      exit_msg = e
-      exit_status = 2
-
-    self._exit_msg_queue.put(exit_msg)
-    sys.exit(exit_status)
diff --git a/erp5/util/benchmark/scalability_tester.py b/erp5/util/benchmark/scalability_tester.py
old mode 100644
new mode 100755
index 6ffc6eddb2..749a4ab50d
--- a/erp5/util/benchmark/scalability_tester.py
+++ b/erp5/util/benchmark/scalability_tester.py
@@ -28,7 +28,7 @@
 #
 ##############################################################################
 
-from benchmark import CSVBenchmarkResult
+from erp5.utils.benchmark.result import CSVBenchmarkResult
 
 class CSVScalabilityBenchmarkResult(CSVBenchmarkResult):
   def flush(self, partial=True):
-- 
2.30.9