Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
misc
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
misc
Commits
b77b5f81
Commit
b77b5f81
authored
Apr 26, 2016
by
Elvis Pranskevichus
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add support for Unix socket testing, JSON output of benchmarks, selective running
parent
e7fe9dc8
Changes
6
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
344 additions
and
119 deletions
+344
-119
.dockerignore
.dockerignore
+1
-0
Dockerfile
Dockerfile
+3
-1
echo_client
echo_client
+81
-39
entrypoint
entrypoint
+4
-2
run_benchmarks
run_benchmarks
+232
-77
torecho.py
torecho.py
+23
-0
No files found.
.dockerignore
View file @
b77b5f81
.cache
sockets
Dockerfile
View file @
b77b5f81
...
...
@@ -13,18 +13,20 @@ RUN mkdir -p /usr/local/python-venvs
RUN
DEBIAN_FRONTEND
=
noninteractive
\
apt-get update
&&
apt-get
install
-y
\
autoconf automake libtool build-essential
\
python3 python3-pip git nodejs
python3 python3-pip git nodejs
gosu
RUN
pip3
install
vex
RUN
vex
--python
=
python3.5
-m
bench pip
install
-U
pip
RUN
mkdir
-p
/var/lib/cache/pip
ADD
http_server.py /tmp/http_server.py
ADD
torecho.py /tmp/torecho.py
ADD
requirements.txt /tmp/requirements.txt
EXPOSE
25000
VOLUME
/var/lib/cache
VOLUME
/tmp/sockets
ENTRYPOINT
["/entrypoint"]
...
...
tcp
_client
→
echo
_client
View file @
b77b5f81
...
...
@@ -6,6 +6,8 @@
import
argparse
from
concurrent
import
futures
import
json
import
math
import
socket
import
time
...
...
@@ -26,6 +28,8 @@ if __name__ == '__main__':
help
=
'socket timeout in seconds'
)
parser
.
add_argument
(
'--addr'
,
default
=
'127.0.0.1:25000'
,
type
=
str
,
help
=
'server address'
)
parser
.
add_argument
(
'--output-format'
,
default
=
'text'
,
type
=
str
,
help
=
'output format'
,
choices
=
[
'text'
,
'json'
])
args
=
parser
.
parse_args
()
unix
=
False
...
...
@@ -49,11 +53,13 @@ if __name__ == '__main__':
else
:
sock
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
sock
.
settimeout
(
5
)
sock
.
settimeout
(
1
5
)
sock
.
connect
(
addr
)
n
=
0
latency_stats
=
np
.
zeros
((
timeout
*
10
,))
latency_stats
=
np
.
zeros
((
timeout
*
100
,))
min_latency
=
float
(
'inf'
)
max_latency
=
0.0
while
time
.
monotonic
()
-
start
<
duration
:
req_start
=
time
.
monotonic
()
...
...
@@ -64,49 +70,42 @@ if __name__ == '__main__':
if
not
resp
:
raise
SystemExit
()
nrecv
+=
len
(
resp
)
req_time
=
round
((
time
.
monotonic
()
-
req_start
)
*
10000
)
req_time
=
round
((
time
.
monotonic
()
-
req_start
)
*
100000
)
if
req_time
>
max_latency
:
max_latency
=
req_time
if
req_time
<
min_latency
:
min_latency
=
req_time
latency_stats
[
req_time
]
+=
1
n
+=
1
return
n
,
latency_stats
return
n
,
latency_stats
,
min_latency
,
max_latency
def
weighted_quantile
(
values
,
quantiles
,
sample_weight
=
None
,
values_sorted
=
False
,
old_style
=
False
):
def
weighted_quantile
(
values
,
quantiles
,
weights
):
""" Very close to np.percentile, but supports weights.
NOTE: quantiles should be in [0, 1]!
:param values: np.array with data
:param quantiles: array-like with many quantiles needed
:param sample_weight: array-like of the same length as `array`
:param values_sorted: bool, if True, then will avoid sorting of initial array
:param old_style: if True, will correct output to be consistent with np.percentile.
:param quantiles: array-like with many quantiles needed,
quantiles should be in [0, 1]!
:param weights: array-like of the same length as `array`
:return: np.array with computed quantiles.
"""
values
=
np
.
array
(
values
)
quantiles
=
np
.
array
(
quantiles
)
if
sample_weight
is
None
:
sample_weight
=
np
.
ones
(
len
(
values
))
sample_weight
=
np
.
array
(
sample_weight
)
weights
=
np
.
array
(
weights
)
assert
np
.
all
(
quantiles
>=
0
)
and
np
.
all
(
quantiles
<=
1
),
\
'quantiles should be in [0, 1]'
if
not
values_sorted
:
sorter
=
np
.
argsort
(
values
)
values
=
values
[
sorter
]
sample_weight
=
sample_weight
[
sorter
]
weighted_quantiles
=
np
.
cumsum
(
weights
)
-
0.5
*
weights
weighted_quantiles
/=
np
.
sum
(
weights
)
weighted_quantiles
=
np
.
cumsum
(
sample_weight
)
-
0.5
*
sample_weight
if
old_style
:
# To be convenient with np.percentile
weighted_quantiles
-=
weighted_quantiles
[
0
]
weighted_quantiles
/=
weighted_quantiles
[
-
1
]
else
:
weighted_quantiles
/=
np
.
sum
(
sample_weight
)
return
np
.
interp
(
quantiles
,
weighted_quantiles
,
values
)
TIMES
=
args
.
times
N
=
args
.
concurrency
DURATION
=
args
.
duration
min_latency
=
float
(
'inf'
)
max_latency
=
0.0
messages
=
0
latency_stats
=
None
start
=
time
.
monotonic
()
...
...
@@ -118,35 +117,78 @@ if __name__ == '__main__':
res
=
futures
.
wait
(
fs
)
for
fut
in
res
.
done
:
t_messages
,
t_latency_stats
=
fut
.
result
()
t_messages
,
t_latency_stats
,
t_min_latency
,
t_max_latency
=
\
fut
.
result
()
messages
+=
t_messages
if
latency_stats
is
None
:
latency_stats
=
t_latency_stats
else
:
latency_stats
=
np
.
add
(
latency_stats
,
t_latency_stats
)
if
t_max_latency
>
max_latency
:
max_latency
=
t_max_latency
if
t_min_latency
<
min_latency
:
min_latency
=
t_min_latency
end
=
time
.
monotonic
()
duration
=
end
-
start
arange
=
np
.
arange
(
len
(
latency_stats
))
stddev
=
np
.
std
(
arange
)
weighted_latency
=
np
.
multiply
(
latency_stats
,
arange
)
mean_latency
=
np
.
sum
(
weighted_latency
)
/
messages
mean_latency
=
np
.
average
(
arange
,
weights
=
latency_stats
)
variance
=
np
.
average
((
arange
-
mean_latency
)
**
2
,
weights
=
latency_stats
)
latency_std
=
math
.
sqrt
(
variance
)
latency_cv
=
latency_std
/
mean_latency
percentiles
=
[
50
,
75
,
90
,
99
]
percentile_data
=
[]
quantiles
=
weighted_quantile
(
arange
,
[
p
/
100
for
p
in
percentiles
],
sample_weight
=
latency_stats
,
values_sorted
=
True
)
weights
=
latency_stats
)
for
i
,
percentile
in
enumerate
(
percentiles
):
percentile_data
.
append
(
'{}%: {}ms'
.
format
(
percentile
,
round
(
quantiles
[
i
],
2
)))
print
(
messages
,
'in'
,
round
(
duration
,
2
))
print
(
'Latency avg: {}ms'
.
format
(
round
(
mean_latency
,
2
)))
print
(
'Latency distribution: {}'
.
format
(
'; '
.
join
(
percentile_data
)))
print
(
'Requests/sec: {}'
.
format
(
round
(
messages
/
duration
,
2
)))
transfer
=
(
messages
*
MSGSIZE
/
(
1024
*
1024
))
/
duration
print
(
'Transfer/sec: {}MiB'
.
format
(
round
(
transfer
,
2
)))
percentile_data
.
append
((
percentile
,
round
(
quantiles
[
i
]
/
100
,
3
)))
data
=
dict
(
messages
=
messages
,
transfer
=
round
((
messages
*
MSGSIZE
/
(
1024
*
1024
))
/
DURATION
,
2
),
rps
=
round
(
messages
/
DURATION
,
2
),
latency_min
=
round
(
min_latency
/
100
,
3
),
latency_mean
=
round
(
mean_latency
/
100
,
3
),
latency_max
=
round
(
max_latency
/
100
,
3
),
latency_std
=
round
(
latency_std
/
100
,
3
),
latency_cv
=
round
(
latency_cv
*
100
,
2
),
latency_percentiles
=
percentile_data
)
if
args
.
output_format
==
'json'
:
data
[
'latency_percentiles'
]
=
json
.
dumps
(
percentile_data
)
output
=
'''
\
{{
"messages": {messages},
"transfer": {transfer},
"rps": {rps},
"latency_min": {latency_min},
"latency_mean": {latency_mean},
"latency_max": {latency_max},
"latency_std": {latency_std},
"latency_cv": {latency_cv},
"latency_percentiles": {latency_percentiles}
}}'''
.
format
(
**
data
)
else
:
data
[
'latency_percentiles'
]
=
'; '
.
join
(
'{}% under {}ms'
.
format
(
*
v
)
for
v
in
percentile_data
)
output
=
'''
\
{messages} {size}KiB messages in {duration} seconds
Latency: min {latency_min}ms; max {latency_max}ms; mean {latency_mean}ms;
\
std: {latency_std}ms ({latency_cv}%)
Latency distribtion: {latency_percentiles}
Requests/sec: {rps}
Transfer/sec: {transfer}MiB
'''
.
format
(
duration
=
DURATION
,
size
=
round
(
MSGSIZE
/
1024
,
2
),
**
data
)
print
(
output
)
entrypoint
View file @
b77b5f81
...
...
@@ -10,5 +10,7 @@ git clone https://github.com/dabeaz/curio.git
git clone https://github.com/MagicStack/uvloop.git
cp
/tmp/http_server.py uvloop/examples/bench/
echo
"Running server on port 25000..."
"
$@
"
UID
=
${
UID
:-
0
}
GID
=
${
GID
:-
0
}
gosu
${
UID
}
:
${
GID
}
"
$@
"
run_benchmarks
View file @
b77b5f81
This diff is collapsed.
Click to expand it.
torecho.py
0 → 100644
View file @
b77b5f81
from
tornado.ioloop
import
IOLoop
from
tornado.tcpserver
import
TCPServer
class
StreamHandler
:
def
__init__
(
self
,
stream
):
self
.
_stream
=
stream
self
.
_stream
.
read_until_close
(
None
,
self
.
_handle_read
)
def
_handle_read
(
self
,
data
):
self
.
_stream
.
write
(
data
)
class
EchoServer
(
TCPServer
):
def
handle_stream
(
self
,
stream
,
address
):
StreamHandler
(
stream
)
if
__name__
==
'__main__'
:
server
=
EchoServer
()
server
.
bind
(
25000
)
server
.
start
(
1
)
IOLoop
.
instance
().
start
()
IOLoop
.
instance
().
close
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment