Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
erp5
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Hardik Juneja
erp5
Commits
de75a33d
Commit
de75a33d
authored
Sep 01, 2011
by
Julien Muchembled
Browse files
Options
Browse Files
Download
Plain Diff
Import erp5.utils.{benchmark,test_browser} from svn.erp5.org:public/erp5/trunk/utils
parents
65d6f5fc
91247aec
Changes
15
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
2434 additions
and
1 deletion
+2434
-1
CHANGES.erp5.util.txt
CHANGES.erp5.util.txt
+6
-1
erp5/util/README.test_browser.txt
erp5/util/README.test_browser.txt
+6
-0
erp5/util/benchmark/__init__.py
erp5/util/benchmark/__init__.py
+0
-0
erp5/util/benchmark/argument.py
erp5/util/benchmark/argument.py
+118
-0
erp5/util/benchmark/examples/createPerson.py
erp5/util/benchmark/examples/createPerson.py
+63
-0
erp5/util/benchmark/examples/userInfo.py
erp5/util/benchmark/examples/userInfo.py
+2
-0
erp5/util/benchmark/performance_tester.py
erp5/util/benchmark/performance_tester.py
+283
-0
erp5/util/benchmark/process.py
erp5/util/benchmark/process.py
+156
-0
erp5/util/benchmark/report.py
erp5/util/benchmark/report.py
+278
-0
erp5/util/benchmark/result.py
erp5/util/benchmark/result.py
+288
-0
erp5/util/benchmark/scalability_tester.py
erp5/util/benchmark/scalability_tester.py
+90
-0
erp5/util/test_browser/__init__.py
erp5/util/test_browser/__init__.py
+0
-0
erp5/util/test_browser/browser.py
erp5/util/test_browser/browser.py
+1046
-0
erp5/util/test_browser/examples/testAddPerson.py
erp5/util/test_browser/examples/testAddPerson.py
+91
-0
setup.py
setup.py
+7
-0
No files found.
CHANGES.erp5.util.txt
View file @
de75a33d
...
...
@@ -4,7 +4,12 @@ Changes
0.2 (unreleased)
----------------
* No changes yet.
* Imported from https://svn.erp5.org/repos/public/erp5/trunk/utils/
- erp5.util.test_browser:
Programmable browser for functional and performance tests for ERP5
- erp5.util.benchmark:
Performance benchmarks for ERP5 with erp5.utils.test_browser
0.1 (2011-08-08)
----------------
...
...
erp5/util/README.test_browser.txt
0 → 100644
View file @
de75a33d
API Documentation
-----------------
You can generate the API documentation using ``epydoc'':
$ epydoc src/erp5
erp5/util/benchmark/__init__.py
0 → 100644
View file @
de75a33d
erp5/util/benchmark/argument.py
0 → 100644
View file @
de75a33d
##############################################################################
#
# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
# Arnaud Fontaine <arnaud.fontaine@nexedi.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import
os
import
argparse
import
functools
class
ArgumentType
(
object
):
@
classmethod
def
directoryType
(
cls
,
path
):
if
not
(
os
.
path
.
isdir
(
path
)
and
os
.
access
(
path
,
os
.
W_OK
)):
raise
argparse
.
ArgumentTypeError
(
"'%s' is not a valid directory or is "
\
"not writable"
%
path
)
return
path
@
classmethod
def
objectFromModule
(
cls
,
module_name
,
object_name
=
None
,
callable_object
=
False
):
if
module_name
.
endswith
(
'.py'
):
module_name
=
module_name
[:
-
3
]
if
not
object_name
:
object_name
=
module_name
import
sys
sys
.
path
.
append
(
os
.
getcwd
())
try
:
module
=
__import__
(
module_name
,
globals
(),
locals
(),
[
object_name
],
-
1
)
except
Exception
,
e
:
raise
argparse
.
ArgumentTypeError
(
"Cannot import '%s.%s': %s"
%
\
(
module_name
,
object_name
,
str
(
e
)))
try
:
obj
=
getattr
(
module
,
object_name
)
except
AttributeError
:
raise
argparse
.
ArgumentTypeError
(
"Could not get '%s' in '%s'"
%
\
(
object_name
,
module_name
))
if
callable_object
and
not
callable
(
obj
):
raise
argparse
.
ArgumentTypeError
(
"'%s.%s' is not callable"
%
(
module_name
,
object_name
))
return
obj
@
classmethod
def
strictlyPositiveIntType
(
cls
,
value
):
try
:
converted_value
=
int
(
value
)
except
ValueError
:
pass
else
:
if
converted_value
>
0
:
return
converted_value
raise
argparse
.
ArgumentTypeError
(
'expects a strictly positive integer'
)
@
classmethod
def
strictlyPositiveIntOrRangeType
(
cls
,
value
):
try
:
return
cls
.
strictlyPositiveIntType
(
value
)
except
argparse
.
ArgumentTypeError
:
try
:
min_max_list
=
value
.
split
(
','
)
except
ValueError
:
pass
else
:
if
len
(
min_max_list
)
==
2
:
minimum
,
maximum
=
cls
.
strictlyPositiveIntType
(
min_max_list
[
0
]),
\
cls
.
strictlyPositiveIntType
(
min_max_list
[
1
])
if
minimum
>=
maximum
:
raise
argparse
.
ArgumentTypeError
(
'%d >= %d'
%
(
minimum
,
maximum
))
return
(
minimum
,
maximum
)
raise
argparse
.
ArgumentTypeError
(
'expects either a strictly positive integer or a range of strictly '
'positive integer separated by a comma'
)
@
classmethod
def
ERP5UrlType
(
cls
,
url
):
if
url
[
-
1
]
==
'/'
:
url_list
=
url
.
rsplit
(
'/'
,
2
)[:
-
1
]
else
:
url_list
=
url
.
rsplit
(
'/'
,
1
)
url_list
[
0
]
=
url_list
[
0
]
+
'/'
if
len
(
url_list
)
!=
2
:
raise
argparse
.
ArgumentTypeError
(
"Invalid URL given"
)
return
url_list
erp5/util/benchmark/examples/createPerson.py
0 → 100644
View file @
de75a33d
# -*- coding: utf-8 -*-
def
createPerson
(
result
,
browser
):
"""
Create a Person and add a telephone number. It can be ran infinitely (e.g.
until it is interrupted by the end user) with 1 concurrent user, through
performance_tester_erp5 with the following command:
performance_tester_erp5 http://foo.bar:4242/erp5/ 1 createPerson
Please note that you must run this command from the same directory of this
script and userInfo.py. Further information about performance_tester_erp5
options and arguments are available by specifying ``--help''.
"""
# Go to Persons module (person_module)
result
(
'Go to person module'
,
browser
.
mainForm
.
submitSelectModule
(
value
=
'/person_module'
))
# Create a new person and record the time elapsed in seconds
result
(
'Add Person'
,
browser
.
mainForm
.
submitNew
())
# Check whether it has been successfully created
assert
browser
.
getTransitionMessage
()
==
'Object created.'
# Fill the first and last name of the newly created person
browser
.
mainForm
.
getControl
(
name
=
'field_my_first_name'
).
value
=
'Create'
browser
.
mainForm
.
getControl
(
name
=
'field_my_last_name'
).
value
=
'Person'
# Submit the changes, record the time elapsed in seconds
result
(
'Save'
,
browser
.
mainForm
.
submitSave
())
# Check whether the changes have been successfully updated
assert
browser
.
getTransitionMessage
()
==
'Data updated.'
person_url
=
browser
.
url
# Add phone number
result
(
'Add telephone'
,
browser
.
mainForm
.
submitSelectAction
(
value
=
'add Telephone'
))
# Fill telephone title and number
browser
.
mainForm
.
getControl
(
name
=
'field_my_title'
).
value
=
'Personal'
browser
.
mainForm
.
getControl
(
name
=
'field_my_telephone_number'
).
value
=
'0123456789'
# Submit the changes, record the time elapsed in seconds
result
(
'Save'
,
browser
.
mainForm
.
submitSave
())
# Check whether the changes have been successfully updated
assert
browser
.
getTransitionMessage
()
==
'Data updated.'
# Go back to the Person page before validating
browser
.
open
(
person_url
)
# Validate it (as the workflow action may not be available yet, try 5 times
# and sleep 5s between each attempts before failing)
show_validate_time
,
waiting_for_validate_action
=
\
browser
.
mainForm
.
submitSelectWorkflow
(
value
=
'validate_action'
,
maximum_attempt_number
=
5
,
sleep_between_attempt
=
5
)
result
(
'Waiting for validate_action'
,
waiting_for_validate_action
)
result
(
'Show validate'
,
show_validate_time
)
result
(
'Validated'
,
browser
.
mainForm
.
submitDialogConfirm
())
assert
browser
.
getTransitionMessage
()
==
'Status changed.'
erp5/util/benchmark/examples/userInfo.py
0 → 100644
View file @
de75a33d
# Specify user login/password used to run the tests
user_tuple
=
((
'zope'
,
'zope'
),)
erp5/util/benchmark/performance_tester.py
0 → 100755
View file @
de75a33d
This diff is collapsed.
Click to expand it.
erp5/util/benchmark/process.py
0 → 100644
View file @
de75a33d
##############################################################################
#
# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
# Arnaud Fontaine <arnaud.fontaine@nexedi.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import
multiprocessing
import
csv
import
traceback
import
os
import
logging
import
signal
import
sys
from
..test_browser.browser
import
Browser
MAXIMUM_ERROR_COUNTER
=
10
RESULT_NUMBER_BEFORE_FLUSHING
=
100
class
BenchmarkProcess
(
multiprocessing
.
Process
):
def
__init__
(
self
,
exit_msg_queue
,
result_klass
,
argument_namespace
,
nb_users
,
user_index
,
*
args
,
**
kwargs
):
self
.
_exit_msg_queue
=
exit_msg_queue
self
.
_result_klass
=
result_klass
self
.
_argument_namespace
=
argument_namespace
self
.
_nb_users
=
nb_users
self
.
_user_index
=
user_index
# Initialized when running the test
self
.
_browser
=
None
self
.
_current_repeat
=
1
# TODO: Per target error counter instead of global one?
self
.
_error_counter
=
0
super
(
BenchmarkProcess
,
self
).
__init__
(
*
args
,
**
kwargs
)
def
stopGracefully
(
self
,
*
args
,
**
kwargs
):
signal
.
signal
(
signal
.
SIGTERM
,
signal
.
SIG_IGN
)
raise
StopIteration
(
"Interrupted by user or because of an error from "
"another process, flushing remaining results..."
)
def
getBrowser
(
self
,
log_file
):
info_list
=
tuple
(
self
.
_argument_namespace
.
url
)
+
\
tuple
(
self
.
_argument_namespace
.
user_tuple
[
self
.
_user_index
])
return
Browser
(
*
info_list
,
is_debug
=
self
.
_argument_namespace
.
enable_debug
,
log_file
=
log_file
,
is_legacy_listbox
=
self
.
_argument_namespace
.
is_legacy_listbox
)
def
runBenchmarkSuiteList
(
self
,
result
):
for
target_idx
,
target
in
enumerate
(
self
.
_argument_namespace
.
benchmark_suite_list
):
self
.
_logger
.
debug
(
"EXECUTE: %s"
%
target
)
result
.
enterSuite
(
target
.
__name__
)
try
:
self
.
_browser
.
open
()
target
(
result
,
self
.
_browser
)
except
StopIteration
:
raise
except
Exception
,
e
:
msg
=
"%s: %s"
%
(
target
,
traceback
.
format_exc
())
try
:
msg
+=
"Last response headers:
\
n
%s
\
n
Last response contents:
\
n
%s"
%
\
(
self
.
_browser
.
headers
,
self
.
_browser
.
contents
)
except
:
pass
if
(
self
.
_current_repeat
==
1
or
self
.
_error_counter
==
MAXIMUM_ERROR_COUNTER
):
raise
RuntimeError
(
msg
)
self
.
_error_counter
+=
1
self
.
_logger
.
warning
(
msg
)
for
stat
in
result
.
getCurrentSuiteStatList
():
mean
=
stat
.
mean
self
.
_logger
.
info
(
"%s: min=%.3f, mean=%.3f (+/- %.3f), max=%.3f"
%
\
(
stat
.
full_label
,
stat
.
minimum
,
mean
,
stat
.
standard_deviation
,
stat
.
maximum
))
if
(
self
.
_argument_namespace
.
max_global_average
and
mean
>
self
.
_argument_namespace
.
max_global_average
):
raise
RuntimeError
(
"Stopping as mean is greater than maximum "
"global average"
)
result
.
exitSuite
()
result
.
iterationFinished
()
def
run
(
self
):
result_instance
=
self
.
_result_klass
(
self
.
_argument_namespace
,
self
.
_nb_users
,
self
.
_user_index
)
self
.
_logger
=
result_instance
.
getLogger
()
# Ensure the data are flushed before exiting, handled by Result class
# __exit__ block
signal
.
signal
(
signal
.
SIGTERM
,
self
.
stopGracefully
)
# Ignore KeyboardInterrupt as it is handled by the parent process
signal
.
signal
(
signal
.
SIGINT
,
signal
.
SIG_IGN
)
exit_status
=
0
exit_msg
=
None
try
:
with
result_instance
as
result
:
self
.
_browser
=
self
.
getBrowser
(
result_instance
.
log_file
)
while
self
.
_current_repeat
!=
(
self
.
_argument_namespace
.
repeat
+
1
):
self
.
_logger
.
info
(
"Iteration: %d"
%
self
.
_current_repeat
)
self
.
runBenchmarkSuiteList
(
result
)
self
.
_current_repeat
+=
1
if
not
self
.
_current_repeat
%
RESULT_NUMBER_BEFORE_FLUSHING
:
result
.
flush
()
except
StopIteration
,
e
:
self
.
_logger
.
error
(
e
)
except
BaseException
,
e
:
exit_msg
=
str
(
e
)
exit_status
=
1
self
.
_exit_msg_queue
.
put
(
exit_msg
)
sys
.
exit
(
exit_status
)
erp5/util/benchmark/report.py
0 → 100755
View file @
de75a33d
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
# Arnaud Fontaine <arnaud.fontaine@nexedi.com>
#
# First version: ERP5Mechanize from Vincent Pelletier <vincent@nexedi.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import
argparse
def
parseArguments
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Generate reports for ERP5 benchmarking suites.'
)
parser
.
add_argument
(
'--enable-debug'
,
dest
=
'is_debug'
,
action
=
'store_true'
,
default
=
False
,
help
=
'Enable debug messages'
)
parser
.
add_argument
(
'--filename-prefix'
,
default
=
'result'
,
metavar
=
'PREFIX'
,
help
=
'Filename prefix for results CSV files '
'(default: result)'
)
parser
.
add_argument
(
'--output-filename'
,
default
=
'results.pdf'
,
metavar
=
'FILENAME'
,
help
=
'PDF output file (default: results.pdf)'
)
parser
.
add_argument
(
'report_directory'
,
help
=
'Reports directory'
)
namespace
=
parser
.
parse_args
()
return
namespace
import
csv
from
.result
import
BenchmarkResultStatistic
def
computeStatisticFromFilenameList
(
argument_namespace
,
filename_list
):
reader_list
=
[]
stat_list
=
[]
label_list
=
[]
for
filename
in
filename_list
:
reader
=
csv
.
reader
(
open
(
filename
,
'rb'
),
delimiter
=
','
,
quoting
=
csv
.
QUOTE_MINIMAL
)
reader_list
.
append
(
reader
)
# Get headers
row_list
=
reader
.
next
()
if
not
label_list
:
label_list
=
row_list
for
label
in
label_list
:
stat_list
.
append
(
BenchmarkResultStatistic
(
*
label
.
split
(
': '
,
1
)))
if
row_list
!=
label_list
:
raise
AssertionError
,
"ERROR: Result labels: %s != %s"
%
\
(
label_list
,
row_list
)
for
row_list
in
reader
:
for
idx
,
row
in
enumerate
(
row_list
):
stat_list
[
idx
].
add
(
float
(
row
))
return
stat_list
def
formatFloatList
(
value_list
):
return
[
format
(
value
,
".3f"
)
for
value
in
value_list
]
import
numpy
import
pylab
from
matplotlib
import
pyplot
,
ticker
def
drawBarDiagram
(
pdf
,
title
,
stat_list
):
mean_list
=
[]
yerr_list
=
[]
minimum_list
=
[]
maximum_list
=
[]
label_list
=
[]
error_list
=
[]
for
stat
in
stat_list
:
mean_list
.
append
(
stat
.
mean
)
yerr_list
.
append
(
stat
.
standard_deviation
)
minimum_list
.
append
(
stat
.
minimum
)
maximum_list
.
append
(
stat
.
maximum
)
label_list
.
append
(
stat
.
label
)
error_list
.
append
(
stat
.
error_sum
)
min_array
=
numpy
.
array
(
minimum_list
)
mean_array
=
numpy
.
array
(
mean_list
)
max_array
=
numpy
.
array
(
maximum_list
)
yerr_lower
=
numpy
.
minimum
(
mean_array
-
min_array
,
yerr_list
)
yerr_upper
=
numpy
.
minimum
(
max_array
-
mean_array
,
yerr_list
)
## Draw diagrams
# Create the figure
figure
=
pyplot
.
figure
(
figsize
=
(
11.69
,
8.29
))
figure
.
subplots_adjust
(
bottom
=
0.13
,
right
=
0.98
,
top
=
0.95
)
pyplot
.
title
(
title
)
# Create the axes along with their labels
axes
=
figure
.
add_subplot
(
111
)
axes
.
set_ylabel
(
'Seconds'
)
axes
.
set_xticks
([])
axes
.
yaxis
.
set_major_locator
(
ticker
.
MultipleLocator
(
0.5
))
axes
.
yaxis
.
set_minor_locator
(
ticker
.
MultipleLocator
(
0.25
))
axes
.
yaxis
.
grid
(
True
,
'major'
,
linewidth
=
1.5
)
axes
.
yaxis
.
grid
(
True
,
'minor'
)
# Create the bars
ind
=
numpy
.
arange
(
len
(
label_list
))
width
=
0.33
min_rects
=
axes
.
bar
(
ind
,
minimum_list
,
width
,
color
=
'y'
,
label
=
'Minimum'
)
avg_rects
=
axes
.
bar
(
ind
+
width
,
mean_list
,
width
,
color
=
'r'
,
label
=
'Mean'
)
axes
.
errorbar
(
numpy
.
arange
(
0.5
,
len
(
stat_list
)),
mean_list
,
yerr
=
[
yerr_lower
,
yerr_upper
],
fmt
=
None
,
label
=
'Standard deviation'
)
max_rects
=
axes
.
bar
(
ind
+
width
*
2
,
maximum_list
,
width
,
label
=
'Maximum'
,
color
=
'g'
)
# Add the legend of bars
axes
.
legend
(
loc
=
0
)
axes
.
table
(
rowLabels
=
[
'Minimum'
,
'Average'
,
'Std. deviation'
,
'Maximum'
,
'Errors'
],
colLabels
=
label_list
,
cellText
=
[
formatFloatList
(
minimum_list
),
formatFloatList
(
mean_list
),
formatFloatList
(
yerr_list
),
formatFloatList
(
maximum_list
),
error_list
],
rowColours
=
(
'y'
,
'r'
,
'b'
,
'g'
,
'w'
),
loc
=
'bottom'
,
colLoc
=
'center'
,
rowLoc
=
'center'
,
cellLoc
=
'center'
)
pdf
.
savefig
()
pylab
.
close
()
def
drawConcurrentUsersPlot
(
pdf
,
title
,
nb_users_list
,
stat_list
):
figure
=
pyplot
.
figure
(
figsize
=
(
11.69
,
8.29
),
frameon
=
False
)
figure
.
subplots_adjust
(
bottom
=
0.1
,
right
=
0.98
,
left
=
0.07
,
top
=
0.95
)
pyplot
.
title
(
title
)
pyplot
.
grid
(
True
,
linewidth
=
1.5
)
axes
=
figure
.
add_subplot
(
111
)
min_array
=
numpy
.
array
([
stat
.
minimum
for
stat
in
stat_list
])
mean_array
=
numpy
.
array
([
stat
.
mean
for
stat
in
stat_list
])
max_array
=
numpy
.
array
([
stat
.
maximum
for
stat
in
stat_list
])
yerr_list
=
[
stat
.
standard_deviation
for
stat
in
stat_list
]
yerr_lower
=
numpy
.
minimum
(
mean_array
-
min_array
,
yerr_list
)
yerr_upper
=
numpy
.
minimum
(
max_array
-
mean_array
,
yerr_list
)
axes
.
plot
(
nb_users_list
,
min_array
,
'yo-'
,
label
=
'Minimum'
)
axes
.
errorbar
(
nb_users_list
,
mean_array
,
yerr
=
[
yerr_lower
,
yerr_upper
],
color
=
'r'
,
ecolor
=
'b'
,
label
=
'Mean'
,
elinewidth
=
2
,
fmt
=
'D-'
,
capsize
=
10.0
)
axes
.
plot
(
nb_users_list
,
max_array
,
'gs-'
,
label
=
'Maximum'
)
axes
.
yaxis
.
set_major_locator
(
ticker
.
MultipleLocator
(
0.5
))
axes
.
yaxis
.
set_minor_locator
(
ticker
.
MultipleLocator
(
0.25
))
axes
.
yaxis
.
grid
(
True
,
'minor'
)
axes
.
xaxis
.
set_major_locator
(
ticker
.
FixedLocator
(
nb_users_list
))
axes
.
set_xticks
(
nb_users_list
)
axes
.
legend
(
loc
=
0
)
axes
.
set_xlabel
(
'Concurrent users'
)
axes
.
set_ylabel
(
'Seconds'
)
pyplot
.
xlim
(
xmin
=
nb_users_list
[
0
])
pdf
.
savefig
()
pylab
.
close
()
from
matplotlib.backends.backend_pdf
import
PdfPages
import
glob
import
os
import
re
user_re
=
re
.
compile
(
'-(
\
d+)use
r
s-'
)
def
generateReport
():
argument_namespace
=
parseArguments
()
filename_iter
=
glob
.
iglob
(
"%s-*repeat*-*users*-*process*.csv"
%
os
.
path
.
join
(
argument_namespace
.
report_directory
,
argument_namespace
.
filename_prefix
))
per_nb_users_report_dict
=
{}
for
filename
in
filename_iter
:
report_dict
=
per_nb_users_report_dict
.
setdefault
(
int
(
user_re
.
search
(
filename
).
group
(
1
)),
{
'filename'
:
[]})
report_dict
[
'filename'
].
append
(
filename
)
pdf
=
PdfPages
(
argument_namespace
.
output_filename
)
for
nb_users
,
report_dict
in
per_nb_users_report_dict
.
items
():
stat_list
=
computeStatisticFromFilenameList
(
argument_namespace
,
report_dict
[
'filename'
])
title
=
"Ran suites with %d users"
%
len
(
report_dict
[
'filename'
])
for
slice_start_idx
in
range
(
0
,
len
(
stat_list
),
12
):
if
slice_start_idx
!=
0
:
title
+=
' (Ctd.)'
drawBarDiagram
(
pdf
,
title
,
stat_list
[
slice_start_idx
:
slice_start_idx
+
12
])
report_dict
[
'stats'
]
=
stat_list
if
len
(
per_nb_users_report_dict
)
!=
1
:
for
i
in
range
(
len
(
report_dict
[
'stats'
])):
stat_list
=
[]
nb_users_list
=
per_nb_users_report_dict
.
keys
()
for
report_dict
in
per_nb_users_report_dict
.
values
():
stat_list
.
append
(
report_dict
[
'stats'
][
i
])
drawConcurrentUsersPlot
(
pdf
,
"%s from %d to %d users (step: %d)"
%
(
stat_list
[
0
].
full_label
,
nb_users_list
[
0
],
nb_users_list
[
-
1
],
nb_users_list
[
1
]
-
nb_users_list
[
0
]),
nb_users_list
,
stat_list
)
pdf
.
close
()
if
__name__
==
'__main__'
:
generateReport
()
erp5/util/benchmark/result.py
0 → 100644
View file @
de75a33d
##############################################################################
#
# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
# Arnaud Fontaine <arnaud.fontaine@nexedi.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import
sys
import
math
import
os
import
csv
import
logging
import
signal
class
BenchmarkResultStatistic
(
object
):
def
__init__
(
self
,
suite
,
label
):
self
.
suite
=
suite
self
.
label
=
label
self
.
full_label
=
'%s: %s'
%
(
self
.
suite
,
self
.
label
)
self
.
minimum
=
sys
.
maxint
self
.
maximum
=
-
1
self
.
n
=
0
self
.
error_sum
=
0
# For calculating the mean
self
.
_value_sum
=
0
# For calculating the standard deviation
self
.
_variance_sum
=
0
self
.
_mean
=
0
def
add_error
(
self
):
self
.
error_sum
+=
1
def
add
(
self
,
value
):
if
value
<
self
.
minimum
:
self
.
minimum
=
value
if
value
>
self
.
maximum
:
self
.
maximum
=
value
self
.
_value_sum
+=
value
self
.
n
+=
1
delta
=
value
-
self
.
_mean
self
.
_mean
+=
delta
/
self
.
n
self
.
_variance_sum
+=
delta
*
(
value
-
self
.
_mean
)
@
property
def
mean
(
self
):
return
self
.
_value_sum
/
self
.
n
@
property
def
standard_deviation
(
self
):
return
math
.
sqrt
(
self
.
_variance_sum
/
self
.
n
)
import
abc
class
BenchmarkResult
(
object
):
__metaclass__
=
abc
.
ABCMeta
def
__init__
(
self
,
argument_namespace
,
nb_users
,
user_index
):
self
.
_argument_namespace
=
argument_namespace
self
.
_nb_users
=
nb_users
self
.
_user_index
=
user_index
self
.
_log_level
=
self
.
_argument_namespace
.
enable_debug
and
\
logging
.
DEBUG
or
logging
.
INFO
self
.
_stat_list
=
[]
self
.
_suite_idx
=
0
self
.
_result_idx
=
0
self
.
result_list
=
[]
self
.
_all_result_list
=
[]
self
.
_first_iteration
=
True
self
.
_current_suite_name
=
None
self
.
_result_idx_checkpoint_list
=
[]
self
.
label_list
=
[]
self
.
_logger
=
None
def
getLogger
(
self
):
if
not
self
.
_logger
:
logging
.
basicConfig
(
stream
=
self
.
log_file
,
level
=
self
.
_log_level
)
self
.
_logger
=
logging
.
getLogger
(
'erp5.util.benchmark'
)
return
self
.
_logger
return
self
.
_logger
def
__enter__
(
self
):
return
self
def
enterSuite
(
self
,
name
):
self
.
_current_suite_name
=
name
def
__call__
(
self
,
label
,
value
):
self
.
result_list
.
append
(
value
)
if
self
.
_first_iteration
:
self
.
_stat_list
.
append
(
BenchmarkResultStatistic
(
self
.
_current_suite_name
,
label
))
self
.
_stat_list
[
self
.
_result_idx
].
add
(
value
)
self
.
_result_idx
+=
1
def
getLabelList
(
self
):
return
[
stat
.
full_label
for
stat
in
self
.
_stat_list
]
def
iterationFinished
(
self
):
self
.
_all_result_list
.
append
(
self
.
result_list
)
if
self
.
_first_iteration
:
self
.
label_list
=
self
.
getLabelList
()
self
.
getLogger
().
debug
(
"RESULTS: %s"
%
self
.
result_list
)
self
.
result_list
=
[]
self
.
_first_iteration
=
False
self
.
_suite_idx
=
0
self
.
_result_idx
=
0
def
getStatList
(
self
):
return
self
.
_stat_list
def
getCurrentSuiteStatList
(
self
):
start_index
=
self
.
_suite_idx
and
\
self
.
_result_idx_checkpoint_list
[
self
.
_suite_idx
-
1
]
or
0
return
self
.
_stat_list
[
start_index
:
self
.
_result_idx
]
def
exitSuite
(
self
):
if
self
.
_first_iteration
:
self
.
_result_idx_checkpoint_list
.
append
(
self
.
_result_idx
)
else
:
expected_result_idx
=
self
.
_result_idx_checkpoint_list
[
self
.
_suite_idx
]
while
self
.
_result_idx
!=
expected_result_idx
:
self
.
result_list
.
append
(
0
)
self
.
_stat_list
[
self
.
_result_idx
].
add_error
()
self
.
_result_idx
+=
1
self
.
_suite_idx
+=
1
@
abc
.
abstractmethod
def
flush
(
self
,
partial
=
True
):
self
.
_all_result_list
=
[]
@
abc
.
abstractmethod
def
__exit__
(
self
,
exc_type
,
exc_value
,
traceback
):
signal
.
signal
(
signal
.
SIGTERM
,
signal
.
SIG_IGN
)
self
.
flush
(
partial
=
False
)
return
True
class
CSVBenchmarkResult
(
BenchmarkResult
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
CSVBenchmarkResult
,
self
).
__init__
(
*
args
,
**
kwargs
)
filename_prefix
=
self
.
_getFilenamePrefix
()
self
.
_result_filename
=
"%s.csv"
%
filename_prefix
self
.
_result_filename_path
=
os
.
path
.
join
(
self
.
_argument_namespace
.
report_directory
,
self
.
_result_filename
)
self
.
_log_filename
=
"%s.log"
%
filename_prefix
self
.
_log_filename_path
=
os
.
path
.
join
(
self
.
_argument_namespace
.
report_directory
,
self
.
_log_filename
)
self
.
log_file
=
open
(
self
.
_log_filename_path
,
'w'
)
def
_getFilenamePrefix
(
self
):
max_nb_users
=
isinstance
(
self
.
_argument_namespace
.
users
,
int
)
and
\
self
.
_argument_namespace
.
users
or
self
.
_argument_namespace
.
users
[
1
]
fmt
=
"%%s-%%drepeat-%%0%ddusers-process%%0%dd"
%
\
(
len
(
str
(
max_nb_users
)),
len
(
str
(
self
.
_nb_users
)))
return
fmt
%
(
self
.
_argument_namespace
.
filename_prefix
,
self
.
_argument_namespace
.
repeat
,
self
.
_nb_users
,
self
.
_user_index
)
def
__enter__
(
self
):
self
.
_result_file
=
open
(
self
.
_result_filename_path
,
'wb'
)
self
.
_csv_writer
=
csv
.
writer
(
self
.
_result_file
,
delimiter
=
','
,
quoting
=
csv
.
QUOTE_MINIMAL
)
return
self
def
flush
(
self
,
partial
=
True
):
if
self
.
_result_file
.
tell
()
==
0
:
self
.
_csv_writer
.
writerow
(
self
.
label_list
)
self
.
_csv_writer
.
writerows
(
self
.
_all_result_list
)
self
.
_result_file
.
flush
()
os
.
fsync
(
self
.
_result_file
.
fileno
())
super
(
CSVBenchmarkResult
,
self
).
flush
(
partial
)
def
__exit__
(
self
,
exc_type
,
exc_value
,
traceback
):
super
(
CSVBenchmarkResult
,
self
).
__exit__
(
exc_type
,
exc_value
,
traceback
)
self
.
_result_file
.
close
()
if
exc_type
and
not
issubclass
(
exc_type
,
StopIteration
):
msg
=
"An error occured, see: %s"
%
self
.
_log_filename_path
self
.
getLogger
().
error
(
"%s: %s"
%
(
exc_type
,
exc_value
))
raise
RuntimeError
(
msg
)
from
cStringIO
import
StringIO
import
xmlrpclib
import
datetime
class
ERP5BenchmarkResult
(
BenchmarkResult
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
self
.
log_file
=
StringIO
()
self
.
_log_buffer_list
=
[]
super
(
ERP5BenchmarkResult
,
self
).
__init__
(
*
args
,
**
kwargs
)
def
iterationFinished
(
self
):
super
(
ERP5BenchmarkResult
,
self
).
iterationFinished
()
# TODO: garbage?
self
.
_log_buffer_list
.
append
(
self
.
log_file
.
getvalue
())
self
.
log_file
.
seek
(
0
)
def
flush
(
self
,
partial
=
True
):
benchmark_result
=
xmlrpclib
.
ServerProxy
(
self
.
_argument_namespace
.
erp5_publish_url
,
verbose
=
True
,
allow_none
=
True
)
benchmark_result
.
BenchmarkResult_addResultLineList
(
self
.
_argument_namespace
.
user_tuple
[
self
.
_user_index
][
0
],
self
.
_argument_namespace
.
repeat
,
self
.
_nb_users
,
self
.
_argument_namespace
.
benchmark_suite_name_list
,
self
.
getLabelList
(),
self
.
_all_result_list
,
self
.
_log_buffer_list
)
super
(
ERP5BenchmarkResult
,
self
).
flush
()
def
__exit__
(
self
,
exc_type
,
exc_value
,
traceback
):
super
(
ERP5BenchmarkResult
,
self
).
__exit__
(
exc_type
,
exc_value
,
traceback
)
@
staticmethod
def
createResultDocument
(
publish_url
,
publish_project
,
repeat
,
nb_users
):
test_result_module
=
xmlrpclib
.
ServerProxy
(
publish_url
,
verbose
=
True
,
allow_none
=
True
)
if
isinstance
(
nb_users
,
tuple
):
nb_users_str
=
'%d to %d'
%
nb_users
else
:
nb_users_str
=
'%d'
%
nb_users
benchmark_result
=
test_result_module
.
TestResultModule_addBenchmarkResult
(
'%d repeat with %s concurrent users'
%
(
repeat
,
nb_users_str
),
publish_project
,
' '
.
join
(
sys
.
argv
),
datetime
.
datetime
.
now
())
return
benchmark_result
[
'id'
]
@
staticmethod
def
closeResultDocument
(
publish_document_url
,
error_message_set
):
result
=
xmlrpclib
.
ServerProxy
(
publish_document_url
,
verbose
=
True
,
allow_none
=
True
)
result
.
BenchmarkResult_completed
(
error_message_set
and
'FAIL'
or
'PASS'
,
error_message_set
)
erp5/util/benchmark/scalability_tester.py
0 → 100755
View file @
de75a33d
#!/usr/bin/env python
##############################################################################
#
# Copyright (c) 2011 Nexedi SA and Contributors. All Rights Reserved.
# Arnaud Fontaine <arnaud.fontaine@nexedi.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from
.result
import
CSVBenchmarkResult
class
CSVScalabilityBenchmarkResult
(
CSVBenchmarkResult
):
def
flush
(
self
,
partial
=
True
):
super
(
CSVScalabilityBenchmarkResult
,
self
).
flush
(
partial
)
self
.
_argument_namespace
.
notify_method
(
self
.
_result_filename
,
self
.
_result_file
.
tell
(),
partial
=
partial
)
from
.performance_tester
import
PerformanceTester
class
ScalabilityTester
(
PerformanceTester
):
def
preRun
(
self
,
*
args
,
**
kwargs
):
pass
def
postRun
(
self
,
error_message_set
):
from
logging
import
Formatter
import
sys
import
urllib
import
urllib2
try
:
urllib2
.
urlopen
(
"http://[%s]:%d/report"
%
\
(
self
.
_argument_namespace
.
manager_address
,
self
.
_argument_namespace
.
manager_port
),
urllib
.
urlencode
({
'error_message_set'
:
'|'
.
join
(
error_message_set
)})).
close
()
except
:
print
>>
sys
.
stderr
,
"ERROR: %s"
%
Formatter
().
formatException
(
sys
.
exc_info
())
def
getResultClass
(
self
):
if
not
self
.
_argument_namespace
.
erp5_publish_url
:
return
CSVScalabilityBenchmarkResult
return
super
(
ScalabilityTester
,
self
).
getResultClass
()
from
slapos.tool.nosqltester
import
NoSQLTester
class
RunScalabilityTester
(
NoSQLTester
):
def
__init__
(
self
):
super
(
RunScalabilityTester
,
self
).
__init__
()
def
_add_parser_arguments
(
self
,
parser
):
super
(
RunScalabilityTester
,
self
).
_add_parser_arguments
(
parser
)
ScalabilityTester
.
_add_parser_arguments
(
parser
)
def
_parse_arguments
(
self
,
parser
):
namespace
=
super
(
RunScalabilityTester
,
self
).
_parse_arguments
(
parser
)
ScalabilityTester
.
_check_parsed_arguments
(
namespace
)
namespace
.
notify_method
=
self
.
send_result_availability_notification
return
namespace
def
run_tester
(
self
):
ScalabilityTester
(
self
.
argument_namespace
).
run
()
def
main
():
RunScalabilityTester
().
run
()
if
__name__
==
'__main__'
:
main
()
erp5/util/test_browser/__init__.py
0 → 100644
View file @
de75a33d
erp5/util/test_browser/browser.py
0 → 100644
View file @
de75a33d
This diff is collapsed.
Click to expand it.
erp5/util/test_browser/examples/testAddPerson.py
0 → 100755
View file @
de75a33d
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from
erp5.util.test_browser.browser
import
Browser
ITERATION
=
20
def
benchmarkAddPerson
(
iteration_counter
,
result_dict
):
"""
Benchmark adding a person.
"""
# Create a browser instance
browser
=
Browser
(
'http://localhost:18080/'
,
'erp5'
,
username
=
'zope'
,
password
=
'zope'
)
# Open ERP5 homepage
browser
.
open
()
# Go to Persons module (person_module)
browser
.
mainForm
.
submitSelectModule
(
value
=
'/person_module'
)
# Create a new person and record the time elapsed in seconds
result_dict
.
setdefault
(
'Create'
,
[]).
append
(
browser
.
mainForm
.
submitNew
())
# Check whether it has been successfully created
assert
browser
.
getTransitionMessage
()
==
'Object created.'
# Fill the first and last name of the newly created person
browser
.
mainForm
.
getControl
(
name
=
'field_my_first_name'
).
value
=
'Foo%d'
%
\
iteration_counter
browser
.
mainForm
.
getControl
(
name
=
'field_my_last_name'
).
value
=
'Bar%d'
%
\
iteration_counter
# Submit the changes, record the time elapsed in seconds
result_dict
.
setdefault
(
'Save'
,
[]).
append
(
browser
.
mainForm
.
submitSave
())
# Check whether the changes have been successfully updated
assert
browser
.
getTransitionMessage
()
==
'Data updated.'
# Validate the person (as the workflow action may not be available yet, try
# 5 times and sleep 5s between each attempts before failing) and record
# time spent on confirmation
browser
.
mainForm
.
submitSelectWorkflow
(
value
=
'validate_action'
,
maximum_attempt_number
=
5
,
sleep_between_attempt
=
5
)
result_dict
.
setdefault
(
'Validate'
,
[]).
append
(
browser
.
mainForm
.
submitDialogConfirm
())
# Check whether it has been successfully validated
assert
browser
.
getTransitionMessage
()
==
'Status changed.'
## Go to the new person from the Persons module, showing how to use
## listbox API
# Go to Persons module first (person_module)
browser
.
mainForm
.
submitSelectModule
(
value
=
'/person_module'
)
# Select all the persons whose Usual Name starts with Foo
browser
.
mainForm
.
getListboxControl
(
2
,
2
).
value
=
'Foo%'
result_dict
.
setdefault
(
'Filter'
,
[]).
append
(
browser
.
mainForm
.
submit
())
# Get the line number
line_number
=
browser
.
getListboxPosition
(
"Foo%(counter)d Bar%(counter)d"
%
\
{
'counter'
:
iteration_counter
},
column_number
=
2
)
# From the column and line_number, we can now get the Link instance
link
=
browser
.
getListboxLink
(
line_number
=
line_number
,
column_number
=
2
)
# Click on the link
link
.
click
()
assert
browser
.
mainForm
.
getControl
(
name
=
'field_my_first_name'
).
value
==
\
'Foo%d'
%
iteration_counter
if
__name__
==
'__main__'
:
# Run benchmarkAddPerson ITERATION times and compute the average time it
# took for each operation
result_dict
=
{}
counter
=
0
while
counter
!=
ITERATION
:
benchmarkAddPerson
(
counter
,
result_dict
)
counter
+=
1
for
title
,
time_list
in
result_dict
.
iteritems
():
print
"%s: %.4fs"
%
(
title
,
float
(
sum
(
time_list
))
/
ITERATION
)
setup.py
View file @
de75a33d
...
...
@@ -39,6 +39,10 @@ setup(name=name,
],
extras_require
=
{
'testnode'
:
[
'slapos.core'
,
'xml_marshaller'
],
'test_browser'
:
[
'zope.testbrowser >= 3.11.1'
,
'z3c.etestbrowser'
],
'benchmark'
:
[
name
+
'[test_browser]'
],
'benchmark-report'
:
[
name
+
'[benchmark]'
,
'matplotlib'
,
'numpy'
],
'scalability_tester'
:
[
name
+
'[benchmark]'
,
'slapos.tool.nosqltester'
],
},
zip_safe
=
True
,
packages
=
find_packages
(),
...
...
@@ -46,6 +50,9 @@ setup(name=name,
entry_points
=
{
'console_scripts'
:
[
'testnode = erp5.util.testnode:main [testnode]'
,
'performance_tester_erp5 = erp5.util.benchmark.performance_tester:main [benchmark]'
,
'scalability_tester_erp5 = erp5.util.benchmark.scalability_tester:main [scalability_tester]'
,
'generate_erp5_tester_report = erp5.util.benchmark.report:generateReport [benchmark-report]'
,
],
}
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment