Commit 7f22bba6 authored by Kirill Smelkov's avatar Kirill Smelkov

X zwrk: New tool to simulate paralell load from multiple clients

Similarly to wrk on HTTP.

Rationale: simulating multiple clients is:

1. noisy - the timings from run to run are changing sometimes up to 50%
2. with significant additional overhead - there are constant OS-level
   process switches in between client processes and this prevents to
   actually create the load.
3. the above load from "2" actually takes resources from the server in
   localhost case.

So let's switch to simlating many requests in lightweight way similarly
to how it is done in wrk - in one process and not so many threads (it
can be just 1) with many connections opened to server and epolly way to
load it with Go providing epoll-goroutine matching.
parent c86ba1b0
/log
/var
/zhash
/zhash_go
/tcpu
/tcpu_go
/tzodb
/tzodb_go
/ioping.tmp
......@@ -432,7 +432,7 @@ GENfs() {
# remember correct hash to later check in benchmarks
# crc32:1552c530 ; oid=0..2127 nread=8534126 t=0.033s (15.7μs / object) x=zhash.py
zhash.py --$zhashfunc $fs1/data.fs |awk '{print $1}' >$ds/zhash.ok
tzodb.py zhash --$zhashfunc $fs1/data.fs |awk '{print $1}' >$ds/zhash.ok
}
# generate data in sqlite
......@@ -860,8 +860,9 @@ cpustat() {
return $ret
}
Nrun=5 # repeat benchmarks N time
Npar=16 # run so many parallel clients in parallel phase
Nrun=5 # repeat benchmarks N time
Nparv="1 2 4 8 16" # run parallel zwrk benchmarks with so many clients (XXX +6, +12 ?)
#Npar=16 # run so many parallel clients in parallel phase
#profile=
profile=cpustat
......@@ -874,6 +875,7 @@ nrun() {
}
# nrunpar ... - run $Npar ... instances in parallel and wait for completion
# XXX running processes in parallel is deprecated in favour of zwrk.
nrunpar() {
$profile _nrunpar "$@"
}
......@@ -1047,9 +1049,10 @@ zbench() {
zhashok=$3
# nrun time demo-zbigarray read $url
nrun zhash.py --check=$zhashok --bench=$topic/%s --$zhashfunc $url
echo -e "\n# ${Npar} clients in parallel"
nrunpar zhash.py --check=$zhashok --bench=$topic/%s-P$Npar --$zhashfunc $url
nrun tzodb.py zhash --check=$zhashok --bench=$topic/%s --$zhashfunc $url
# XXX running processes in parallel is deprecated in favour of zwrk.
# echo -e "\n# ${Npar} clients in parallel"
# nrunpar tzodb.py zhash --check=$zhashok --bench=$topic/%s-P$Npar --$zhashfunc $url
echo
zbench_go $url $topic $zhashok
}
......@@ -1059,11 +1062,17 @@ zbench_go() {
url=$1
topic=$2
zhashok=$3
nrun zhash_go -check=$zhashok --bench=$topic/%s --log_dir=$log -$zhashfunc $url
nrun zhash_go -check=$zhashok --bench=$topic/%s --log_dir=$log -$zhashfunc -useprefetch $url
nrun tzodb_go -log_dir=$log zhash -check=$zhashok -bench=$topic/%s -$zhashfunc $url
nrun tzodb_go -log_dir=$log zhash -check=$zhashok -bench=$topic/%s -$zhashfunc -useprefetch $url
echo -e "\n# ${Npar} clients in parallel"
nrunpar zhash_go -check=$zhashok --bench=$topic/%s-P$Npar --log_dir=$log -$zhashfunc $url
# XXX running processes in parallel is deprecated in favour of zwrk.
# echo -e "\n# ${Npar} clients in parallel"
# nrunpar tzodb_go -log_dir=$log zhash -check=$zhashok -bench=$topic/%s-P$Npar -$zhashfunc $url
for i in ${Nparv}; do
echo -e "\n# $i clients in parallel"
nrun tzodb_go -log_dir=$log zwrk -nclient $i -check=$zhashok -bench=$topic/%s -$zhashfunc $url
done
}
......@@ -1402,15 +1411,15 @@ cpustat)
;;
esac
# make sure zhash*, tcpu* and zgenprod are on PATH (because we could be invoked from another dir)
# make sure tzodb*, tcpu* and zgenprod are on PATH (because we could be invoked from another dir)
X=$(cd `dirname $0` && pwd)
export PATH=$X:$PATH
# rebuild go bits
# neo/py, wendelin.core, ... - must be pip install'ed - `neotest deploy` cares about that
go install -v lab.nexedi.com/kirr/neo/go/...
go build -o $X/zhash_go $X/zhash.go
#go build -race -o $X/zhash_go $X/zhash.go
go build -o $X/tzodb_go $X/tzodb.go
#go build -race -o $X/tzodb_go $X/tzodb.go
go build -o $X/tcpu_go $X/tcpu.go
# setup network & fs environment
......
......@@ -69,6 +69,8 @@ func prettyarg(arg string) string {
// benchit runs the benchmark for benchf
func benchit(benchname string, bencharg string, benchf func(*testing.B, string)) {
// FIXME testing.Benchmark does not allow to detect whether benchmark failed.
// (use log.Fatal, not {t,b}.Fatal as workaround)
r := testing.Benchmark(func (b *testing.B) {
benchf(b, bencharg)
})
......@@ -86,7 +88,7 @@ func benchit(benchname string, bencharg string, benchf func(*testing.B, string))
func benchHash(b *testing.B, h hash.Hash, arg string) {
blksize, err := strconv.Atoi(arg)
if err != nil {
b.Fatal(err)
log.Fatal(err)
}
data := make([]byte, blksize)
......@@ -101,23 +103,23 @@ func BenchmarkAdler32(b *testing.B, arg string) { benchHash(b, adler32.New(), ar
func BenchmarkCrc32(b *testing.B, arg string) { benchHash(b, crc32.NewIEEE(), arg) }
func BenchmarkSha1(b *testing.B, arg string) { benchHash(b, sha1.New(), arg) }
func xreadfile(t testing.TB, path string) []byte {
func xreadfile(path string) []byte {
data, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
log.Fatal(err)
}
return data
}
func BenchmarkUnzlib(b *testing.B, zfile string) {
zdata := xreadfile(b, fmt.Sprintf("testdata/zlib/%s", zfile))
zdata := xreadfile(fmt.Sprintf("testdata/zlib/%s", zfile))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := xzlib.Decompress(zdata)
if err != nil {
b.Fatal(err)
log.Fatal(err)
}
}
}
......
......@@ -24,7 +24,7 @@ from __future__ import print_function
import sys
import hashlib
import zhash
import tzodb
import zlib
from time import time
from math import ceil, log10
......@@ -125,8 +125,8 @@ def _bench_hasher(b, h, blksize):
i += 1
def bench_adler32(b, blksize): _bench_hasher(b, zhash.Adler32Hasher(), blksize)
def bench_crc32(b, blksize): _bench_hasher(b, zhash.CRC32Hasher(), blksize)
def bench_adler32(b, blksize): _bench_hasher(b, tzodb.Adler32Hasher(), blksize)
def bench_crc32(b, blksize): _bench_hasher(b, tzodb.CRC32Hasher(), blksize)
def bench_sha1(b, blksize): _bench_hasher(b, hashlib.sha1(), blksize)
......
This diff is collapsed.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com>
# Copyright (C) 2017-2018 Nexedi SA and Contributors.
# Kirill Smelkov <kirr@nexedi.com>
#
# This program is free software: you can Use, Study, Modify and Redistribute
# it under the terms of the GNU General Public License version 3, or (at your
......@@ -18,7 +18,7 @@
#
# See COPYING file for full licensing terms.
# See https://www.nexedi.com/licensing for rationale and options.
"""zhash - compute hash of whole latest objects stream in a ZODB database"""
"""tzodb - ZODB-related benchmarks"""
from __future__ import print_function
......@@ -81,7 +81,7 @@ hashRegistry = {
def usage(w):
print(\
"""Usage: zhash [options] url
"""Usage: tzodb zhash [options] url
options:
......@@ -97,9 +97,14 @@ options:
--bench=<topic> use benchmarking format for output
""", file=w)
def main():
def zhash():
"""zhash - compute hash of whole latest objects stream in a ZODB database"""
if len(sys.argv) < 2 or sys.argv[1] != "zhash":
usage(sys.stderr)
exit(1)
try:
optv, argv = getopt(sys.argv[1:], "h", ["help", "check=", "bench="] + hashRegistry.keys())
optv, argv = getopt(sys.argv[2:], "h", ["help", "check=", "bench="] + hashRegistry.keys())
except GetoptError as e:
print("E: %s" % e, file=sys.stderr)
usage(sys.stderr)
......@@ -164,7 +169,7 @@ def main():
x = "zhash.py"
hresult = "%s:%s" % (h.name, h.hexdigest())
if bench is None:
print('%s ; oid=0..%d nread=%d t=%.3fs (%.1fμs / object) x=%s' % \
print('%s ; oid=0..%d nread=%d t=%.3fs (%.1fµs / object) x=%s' % \
(hresult, oid-1, nread, dt, dt * 1E6 / oid, x))
else:
topic = bench % x
......@@ -175,5 +180,9 @@ def main():
print("%s: hash mismatch: expected %s ; got %s\t# x=%s" % (url, check, hresult, x), file=sys.stderr)
sys.exit(1)
def main():
zhash() # XXX stub
if __name__ == '__main__':
main()
......@@ -32,6 +32,7 @@ import (
// OpenOptions describes options for OpenStorage
type OpenOptions struct {
ReadOnly bool // whether to open storage as read-only
NoCache bool // don't use cache for read/write operations
}
// DriverOpener is a function to open a storage driver
......@@ -69,6 +70,7 @@ func OpenStorage(ctx context.Context, storageURL string, opt *OpenOptions) (ISto
// XXX commonly handle some options from url -> opt?
// (e.g. ?readonly=1 -> opt.ReadOnly=true + remove ?readonly=1 from URL)
// ----//---- nocache
opener, ok := driverRegistry[u.Scheme]
if !ok {
......@@ -80,12 +82,16 @@ func OpenStorage(ctx context.Context, storageURL string, opt *OpenOptions) (ISto
return nil, err
}
return &storage{
IStorageDriver: storDriver,
var cache *Cache
if !opt.NoCache {
// small cache so that prefetch can work for loading
// XXX 512K hardcoded (= ~ 128 · 4K-entries)
l1cache: NewCache(storDriver, 128 * 4*1024),
cache = NewCache(storDriver, 128 * 4*1024)
}
return &storage{
IStorageDriver: storDriver,
l1cache: cache,
}, nil
}
......@@ -97,7 +103,7 @@ func OpenStorage(ctx context.Context, storageURL string, opt *OpenOptions) (ISto
// and other storage-independed higher-level functionality.
type storage struct {
IStorageDriver
l1cache *Cache
l1cache *Cache // can be =nil, if opened with NoCache
}
......@@ -106,9 +112,15 @@ type storage struct {
func (s *storage) Load(ctx context.Context, xid Xid) (*mem.Buf, Tid, error) {
// XXX here: offload xid validation from cache and driver ?
// XXX here: offload wrapping err -> OpError{"load", err} ?
return s.l1cache.Load(ctx, xid)
if s.l1cache != nil {
return s.l1cache.Load(ctx, xid)
} else {
return s.IStorageDriver.Load(ctx, xid)
}
}
func (s *storage) Prefetch(ctx context.Context, xid Xid) {
s.l1cache.Prefetch(ctx, xid)
if s.l1cache != nil {
s.l1cache.Prefetch(ctx, xid)
}
}
......@@ -179,6 +179,7 @@ type IStorage interface {
// started, to complete.
//
// Prefetch does not return any error.
// Prefetch is noop if storage was opened with NoCache option.
Prefetch(ctx context.Context, xid Xid)
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment