Commit 80f75eb1 authored by Elvis Pranskevichus's avatar Elvis Pranskevichus

Add nodejs test, latency stats

parent fd1704ae
......@@ -11,9 +11,9 @@ ENV WORKON_HOME /usr/local/python-venvs
RUN mkdir -p /usr/local/python-venvs
RUN DEBIAN_FRONTEND=noninteractive \
apt-get install -y \
apt-get update && apt-get install -y \
autoconf automake libtool build-essential \
python3 python3-pip git
python3 python3-pip git nodejs
RUN pip3 install vex
RUN vex --python=python3.5 -m bench pip install -U pip
......
......@@ -11,4 +11,4 @@ git clone https://github.com/MagicStack/uvloop.git
cp /tmp/http_server.py uvloop/examples/bench/
echo "Running server on port 25000..."
vex bench python "$@"
"$@"
#!/usr/bin/env python3
import argparse
import multiprocessing
import os.path
import socket
import subprocess
......@@ -13,7 +15,6 @@ _dir = os.path.dirname(__file__)
_cache = os.path.abspath(os.path.join(_dir, '.cache'))
http_client = "wrk --latency -d 30 -c 200 -t 4 http://127.0.0.1:25000/{msize}"
tcp_client = "./tcp_client --addr=127.0.0.1:25000 --workers=4 --msize={msize}"
http_server = "uvloop/examples/bench/http_server.py --addr='0.0.0.0:25000'"
......@@ -21,39 +22,76 @@ server_base = ['docker', 'run', '--rm', '-t', '-p', '25000:25000',
'-v', '{_cache}:/var/lib/cache'.format(_cache=_cache),
'--name', 'magicbench', 'magic/benchmark']
python = ['vex', 'bench', 'python']
nodejs = ['nodejs']
benchmarks = [{
'title': 'TCP echo server (asyncio)',
'server': ['uvloop/examples/bench/server.py', '--addr=0.0.0.0:25000',
'server': python + ['uvloop/examples/bench/server.py',
'--addr=0.0.0.0:25000',
'--streams'],
'client': ['./tcp_client', '--addr=127.0.0.1:25000', '--workers=4'],
'warmup': ['--msize=1024', '--workers=4', '--duration=5'],
'client': ['./tcp_client', '--addr=127.0.0.1:25000', '--concurrency=4'],
'warmup': ['--msize=1024', '--concurrency=4', '--duration=5'],
'variations': [{
'title': '1kb messages, concurrency 4',
'args': ['--msize=1024', '--concurrency=4']
}, {
'title': '10kb messages, concurrency 4',
'args': ['--msize=10240', '--concurrency=4']
}, {
'title': '100kb messages, concurrency 4',
'args': ['--msize=102400', '--concurrency=4']
}]
}, {
'title': 'TCP echo server (gevent)',
'server': python + ['curio/examples/bench/gevecho.py'],
'client': ['./tcp_client', '--addr=127.0.0.1:25000', '--concurrency=4'],
'warmup': ['--msize=1024', '--concurrency=4', '--duration=5'],
'variations': [{
'title': '1kb messages, concurrency 4',
'args': ['--msize=1024', '--concurrency=4']
}, {
'title': '10kb messages, concurrency 4',
'args': ['--msize=10240', '--concurrency=4']
}, {
'title': '100kb messages, concurrency 4',
'args': ['--msize=102400', '--concurrency=4']
}]
}, {
'title': 'TCP echo server (nodejs)',
'server': nodejs + ['curio/examples/bench/nodeecho.js'],
'client': ['./tcp_client', '--addr=127.0.0.1:25000', '--concurrency=4'],
'warmup': ['--msize=1024', '--concurrency=4', '--duration=5'],
'variations': [{
'title': '1kb messages, concurrency 4',
'args': ['--msize=1024', '--workers=4', '--duration=30']
'args': ['--msize=1024', '--concurrency=4']
}, {
'title': '10kb messages, concurrency 4',
'args': ['--msize=10240', '--workers=4', '--duration=30']
'args': ['--msize=10240', '--concurrency=4']
}, {
'title': '100kb messages, concurrency 4',
'args': ['--msize=102400', '--workers=4', '--duration=30']
'args': ['--msize=102400', '--concurrency=4']
}]
}, {
'title': 'TCP echo server (uvloop)',
'server': ['uvloop/examples/bench/server.py', '--addr=0.0.0.0:25000',
'server': python + ['uvloop/examples/bench/server.py',
'--addr=0.0.0.0:25000',
'--streams', '--uvloop'],
'client': ['./tcp_client', '--addr=127.0.0.1:25000', '--workers=4'],
'warmup': ['--msize=1024', '--workers=4', '--duration=5'],
'client': ['./tcp_client', '--addr=127.0.0.1:25000', '--concurrency=4'],
'warmup': ['--msize=1024', '--concurrency=4', '--duration=5'],
'variations': [{
'title': '1kb messages, concurrency 4',
'args': ['--msize=1024', '--workers=4', '--duration=30']
'args': ['--msize=1024', '--concurrency=4']
}, {
'title': '10kb messages, concurrency 4',
'args': ['--msize=10240', '--workers=4', '--duration=30']
'args': ['--msize=10240', '--concurrency=4']
}, {
'title': '100kb messages, concurrency 4',
'args': ['--msize=102400', '--workers=4', '--duration=30']
'args': ['--msize=102400', '--concurrency=4']
}]
}]
......@@ -63,7 +101,7 @@ def abort(msg):
sys.exit(1)
def start_and_wait_for_server(server_cmd, timeout=10):
def start_and_wait_for_server(server_cmd, timeout=60):
server = subprocess.Popen(server_cmd, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
......@@ -83,7 +121,7 @@ def start_and_wait_for_server(server_cmd, timeout=10):
if server.returncode is not None:
abort('Could not start server\n' +
'----------------------\n' +
server.communicate()[1])
'\n\n'.join(server.communicate()))
else:
sock.close()
return server
......@@ -92,7 +130,7 @@ def start_and_wait_for_server(server_cmd, timeout=10):
abort('Could not start server\n' +
'----------------------\n' +
server.communicate()[1])
'\n\n'.join(server.communicate()))
def kill_server(server):
......@@ -100,9 +138,20 @@ def kill_server(server):
print('Shutting down server...')
subprocess.check_output(['docker', 'stop', 'magicbench'])
server.wait()
ret = subprocess.call(['docker', 'inspect', '--type=container',
'magicbench'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if ret == 0:
subprocess.check_output(['docker', 'rm', 'magicbench'])
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--duration', '-T', default=30, type=int,
help='duration of each benchmark in seconds')
args = parser.parse_args()
for benchmark in benchmarks:
print(benchmark['title'])
print('=' * len(benchmark['title']))
......@@ -118,12 +167,17 @@ def main():
warmup_cmd = benchmark['client'] + benchmark['warmup']
print(' '.join(warmup_cmd))
subprocess.check_output(warmup_cmd)
print()
duration = args.duration
try:
for variation in benchmark['variations']:
print(variation['title'])
print('-' * len(variation['title']))
title = 'BENCHMARK: {}'.format(variation['title'])
print(title)
print('-' * len(title))
client_cmd = benchmark['client'] + variation['args']
client_cmd += ['--duration={}'.format(duration)]
print(' '.join(client_cmd))
subprocess.check_call(client_cmd)
print()
......
......@@ -3,12 +3,14 @@
# Copied with minimal modifications from curio
# https://github.com/dabeaz/curio
from concurrent import futures
import argparse
from concurrent import futures
import socket
import time
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
......@@ -18,10 +20,12 @@ if __name__ == '__main__':
help='duration of test in seconds')
parser.add_argument('--times', default=1, type=int,
help='number of times to run the test')
parser.add_argument('--workers', default=3, type=int,
help='number of workers')
parser.add_argument('--concurrency', default=3, type=int,
help='request concurrency')
parser.add_argument('--timeout', default=2, type=int,
help='socket timeout in seconds')
parser.add_argument('--addr', default='127.0.0.1:25000', type=str,
help='number of workers')
help='server address')
args = parser.parse_args()
unix = False
......@@ -37,16 +41,22 @@ if __name__ == '__main__':
msg = b'x' * MSGSIZE
timeout = args.timeout * 1000
def run_test(start, duration):
if unix:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect(addr)
n = 0
latency_stats = np.zeros((timeout,))
while time.monotonic() - start < duration:
req_start = time.monotonic()
sock.sendall(msg)
nrecv = 0
while nrecv < MSGSIZE:
......@@ -54,15 +64,18 @@ if __name__ == '__main__':
if not resp:
raise SystemExit()
nrecv += len(resp)
req_time = round((time.monotonic() - req_start) * 1000)
latency_stats[req_time] += 1
n += 1
return n
return n, latency_stats
TIMES = args.times
N = args.workers
N = args.concurrency
DURATION = args.duration
messages = 0
latency_stats = None
start = time.monotonic()
for _ in range(TIMES):
with futures.ProcessPoolExecutor(max_workers=N) as e:
......@@ -72,12 +85,36 @@ if __name__ == '__main__':
res = futures.wait(fs)
for fut in res.done:
messages += fut.result()
t_messages, t_latency_stats = fut.result()
messages += t_messages
if latency_stats is None:
latency_stats = t_latency_stats
else:
latency_stats = np.add(latency_stats, t_latency_stats)
end = time.monotonic()
duration = end - start
weighted_latency = np.multiply(latency_stats, np.arange(timeout))
mean_latency = (np.sum(weighted_latency) / timeout)
trimmed_latency = np.trim_zeros(latency_stats, 'b')
percentiles = [50, 75, 90, 99]
percentile_data = []
latency_chart = np.stack((np.arange(len(trimmed_latency)),
trimmed_latency), axis=-1)
percentile_values = np.percentile(latency_chart, percentiles, axis=0)
for i, percentile in enumerate(percentiles):
percentile_data.append('{}%: {}ms'.format(
percentile, round(percentile_values[i][0], 2)))
print(messages, 'in', round(duration, 2))
print(round(messages / duration, 2), 'requests/sec')
throughput = (messages * MSGSIZE / (1024 * 1024)) / duration
print(round(throughput, 2), 'MiB/sec')
print('Latency avg: {}ms'.format(round(mean_latency, 2)))
# print('Latency distribution: {}'.format('; '.join(percentile_data)))
print('Requests/sec: {}'.format(round(messages / duration, 2)))
transfer = (messages * MSGSIZE / (1024 * 1024)) / duration
print('Transfer/sec: {}MiB'.format(round(transfer, 2)))
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment