Commit e0d875bc authored by Kirill Smelkov's avatar Kirill Smelkov

X neotest: Teach it to benchmark NEO with storage partitioned to several nodes

With default still being 1 partition. To be tested on field. Benchplot
to be updated for new benchmark namings.
parent 70c63882
...@@ -346,9 +346,18 @@ func (p *Node) dial(ctx context.Context) (_ *neonet.NodeLink, err error) { ...@@ -346,9 +346,18 @@ func (p *Node) dial(ctx context.Context) (_ *neonet.NodeLink, err error) {
case accept.YourUUID != app.MyInfo.UUID: case accept.YourUUID != app.MyInfo.UUID:
err = fmt.Errorf("connected, but peer gives us uuid %v (our is %v)", accept.YourUUID, app.MyInfo.UUID) err = fmt.Errorf("connected, but peer gives us uuid %v (our is %v)", accept.YourUUID, app.MyInfo.UUID)
// XXX Node.Dial is currently used by Client only.
// XXX For Client it would be not correct to check #partition only at
// XXX connection time, but it has to be also checked after always as every
// XXX operation could coincide with cluster reconfiguration.
//
// FIXME for now we simply don't check N(p)
//
// XXX NumReplicas: neo/py meaning for n(replica) = `n(real-replica) - 1` // XXX NumReplicas: neo/py meaning for n(replica) = `n(real-replica) - 1`
/*
case !(accept.NumPartitions == 1 && accept.NumReplicas == 0): case !(accept.NumPartitions == 1 && accept.NumReplicas == 0):
err = fmt.Errorf("connected but TODO peer works with !1x1 partition table.") err = fmt.Errorf("connected but TODO peer works with !1x1 partition table.")
*/
} }
if err != nil { if err != nil {
......
...@@ -189,9 +189,13 @@ init_net() { ...@@ -189,9 +189,13 @@ init_net() {
Mbind=[$myaddr]:5552 # NEO master Mbind=[$myaddr]:5552 # NEO master
Zbind=[$myaddr]:5553 # ZEO Zbind=[$myaddr]:5553 # ZEO
# NEO storage. bind not strictly needed but this way we also make sure }
# Sbind n - return bind address for storage #n (n=1...)
Sbind() {
# for NEO storage. bind not strictly needed but this way we also make sure
# no 2 storages are started at the same time. # no 2 storages are started at the same time.
Sbind=[$myaddr]:5554 echo "[$myaddr]:$((5554 + $1 - 1))"
} }
# init_fs - do initial disk allocations # init_fs - do initial disk allocations
...@@ -215,12 +219,39 @@ switch_dataset() { ...@@ -215,12 +219,39 @@ switch_dataset() {
ds=$var/$ds ds=$var/$ds
fs1=$ds/fs1; mkdir -p $fs1 # FileStorage (and so ZEO and NEO/go) data fs1=$ds/fs1; mkdir -p $fs1 # FileStorage (and so ZEO and NEO/go) data
neolite=$ds/neo.sqlite # NEO/py: sqlite
neosql=$ds/neo.sql; mkdir -p $neosql # NEO/py: mariadb
mycnf=$neosql/mariadb.cnf # NEO/py: mariadb config
mysock=$(realpath $neosql)/my.sock # NEO/py: mariadb socket
} }
Pneo=1 # N(partition) for NEO (= N(S) nodes to run)
# XXX switch_neo_P (like switch_dataset)
# neolite n - NEO: sqlite for partition n
neolite() {
echo $ds/neo.sqlite·P$Pneo-$1
}
# neosql n - NEO: mariadb directory for partition n
neosql() {
local d=$ds/neo.sql·P$Pneo-$1; mkdir -p $d
echo $d
}
# mycnf n - NEO: mariadb config for partition n
mycnf() {
echo `neosql $1`/mariadb.cnf
}
# mysock n - NEO: mariadb socket for partition n
mysock() {
echo $(realpath `neosql $1`)/my.sock
}
# mylog n - NEO: mariadb log for partition n
mylog() {
echo $log/mdb.log·P$Pneo-$1
}
# foreach_dataset <command> - run command once for each dataset serially # foreach_dataset <command> - run command once for each dataset serially
foreach_dataset() { foreach_dataset() {
for d in "${datasetv[@]}" ; do for d in "${datasetv[@]}" ; do
...@@ -229,14 +260,16 @@ foreach_dataset() { ...@@ -229,14 +260,16 @@ foreach_dataset() {
done done
} }
# control started NEO cluster # xneoctl ... - control started NEO cluster
xneoctl() { xneoctl() {
neoctl -a $Abind "$@" neoctl -a $Abind "$@"
} }
# control started MariaDB # xmysql ... - control all started MariaDB servers
xmysql() { xmysql() {
mysql --defaults-file=$mycnf "$@" for i in `seq $Pneo`; do
mysql --defaults-file=`mycnf $i` "$@"
done
} }
# if we are abnormally terminating # if we are abnormally terminating
...@@ -263,13 +296,15 @@ neopy_log() { ...@@ -263,13 +296,15 @@ neopy_log() {
echo --logfile=$log/$1.log echo --logfile=$log/$1.log
} }
# M{py,go} ... - spawn master # M{py,go} n ... - spawn master with N(partition) = n
Mpy() { Mpy() {
# --autostart=1 local n=$1
shift
# XXX neo/py meaning for --replicas is `n(real-replica) - 1` # XXX neo/py meaning for --replicas is `n(real-replica) - 1`
exec -a Mpy \ exec -a Mpy \
neomaster --cluster=$neocluster --bind=$Mbind --masters=$Mbind \ neomaster --cluster=$neocluster --bind=$Mbind --masters=$Mbind \
--replicas 0 --partitions 1 `neopy_log Mpy` "$@" & --replicas 0 --partitions $n `neopy_log Mpy` "$@" &
} }
Mgo() { Mgo() {
...@@ -277,20 +312,27 @@ Mgo() { ...@@ -277,20 +312,27 @@ Mgo() {
neo --log_dir=$log master -cluster=$neocluster -bind=$Mbind "$@" & neo --log_dir=$log master -cluster=$neocluster -bind=$Mbind "$@" &
} }
# Spy ... - spawn NEO/py storage # Spy <n> ... - spawn NEO/py storage handling partition #n
Spy() { Spy() {
local n=$1
shift
# --adapter=... # --adapter=...
# --database=... # --database=...
# --engine=... # --engine=...
exec -a Spy \ exec -a Spy \
neostorage --cluster=$neocluster --bind=$Sbind --masters=$Mbind `neopy_log Spy` "$@" & neostorage --cluster=$neocluster --bind=`Sbind $n` --masters=$Mbind `neopy_log Spy` "$@" &
} }
# Sgo <data.fs> - spawn NEO/go storage # Sgo <n> - spawn NEO/go storage
Sgo() { Sgo() {
local n=$1
shift
test $n = 1 || die "Sgo: TODO ·P<n>"
# -alsologtostderr # -alsologtostderr
exec -a Sgo \ exec -a Sgo \
neo -log_dir=$log storage -cluster=$neocluster -bind=$Sbind -masters=$Mbind "$@" & neo -log_dir=$log storage -cluster=$neocluster -bind=`Sbind $n` -masters=$Mbind "$@" &
} }
# Apy ... - spawn NEO/py admin # Apy ... - spawn NEO/py admin
...@@ -310,45 +352,53 @@ Zpy() { ...@@ -310,45 +352,53 @@ Zpy() {
# spawn NEO/go cluster (Sgo+Mpy+Apy) working on data.fs # spawn NEO/go cluster (Sgo+Mpy+Apy) working on data.fs
NEOgofs1() { NEOgofs1() {
Mpy --autostart=1 Mpy 1 --autostart=1
Sgo fs1://$fs1/data.fs Sgo 1 fs1://$fs1/data.fs
Apy Apy
} }
# spawn NEO/go cluster (Sgo+Mpy+Apy) working on sqlite db # spawn NEO/go cluster (Sgo+Mpy+Apy) working on sqlite db
NEOgolite() { NEOgolite() {
Mpy --autostart=1 test $Pneo == 1 || die "NEOgolite: Sgo not ready to handle ·P<n>"
Sgo sqlite://$neolite Mpy 1 --autostart=1
Sgo 1 sqlite://`neolite 1`
Apy Apy
} }
# spawn NEO/py cluster working on sqlite db # spawn NEO/py cluster working on sqlite db
NEOpylite() { NEOpylite() {
Mpy --autostart=1 Mpy $Pneo --autostart=$Pneo
Spy --adapter=SQLite --database=$neolite for i in `seq $Pneo`; do
Spy $i --adapter=SQLite --database=`neolite $i`
done
Apy Apy
} }
# spawn NEO/py cluster working on mariadb # spawn NEO/py cluster working on mariadb
NEOpysql() { NEOpysql() {
MDB for i in `seq $Pneo`; do
MDB $i
done
sleep 1 # XXX fragile sleep 1 # XXX fragile
xmysql -e "CREATE DATABASE IF NOT EXISTS neo" xmysql -e "CREATE DATABASE IF NOT EXISTS neo"
Mpy --autostart=1 Mpy $Pneo --autostart=$Pneo
Spy --adapter=MySQL --engine=InnoDB --database=root@neo$mysock for i in `seq $Pneo`; do
Spy $i --adapter=MySQL --engine=InnoDB --database=root@neo`mysock $i`
done
Apy Apy
} }
# setup/spawn mariadb # MDB <n> - setup/spawn mariadb for partition n
MDB() { MDB() {
cat >$mycnf <<EOF local n=$1
cat >`mycnf $n` <<EOF
[mysqld] [mysqld]
skip_networking skip_networking
socket = $mysock socket = `mysock $n`
datadir = $neosql/data datadir = `neosql $n`/data
log_error = $log/mdb.log log_error = `mylog $n`
# the following comes from # the following comes from
# https://lab.nexedi.com/nexedi/slapos/blob/bd197876/software/neoppod/my.cnf.in#L18 # https://lab.nexedi.com/nexedi/slapos/blob/bd197876/software/neoppod/my.cnf.in#L18
...@@ -387,19 +437,19 @@ character_set_server = utf8 ...@@ -387,19 +437,19 @@ character_set_server = utf8
skip_character_set_client_handshake skip_character_set_client_handshake
[client] [client]
socket = $mysock socket = `mysock $n`
user = root user = root
EOF EOF
# setup system tables on first run # setup system tables on first run
if ! test -e $neosql/data ; then if ! test -e `neosql $n`/data ; then
# XXX --cross-bootstrap only to avoid final large print notice # XXX --cross-bootstrap only to avoid final large print notice
# XXX but cross-bootstrap filters out current host name from installed tables - is it ok? # XXX but cross-bootstrap filters out current host name from installed tables - is it ok?
mysql_bin=$(dirname `which mysql`) # XXX under slapos mysql_install_db cannot find its base automatically mysql_bin=$(dirname `which mysql`) # XXX under slapos mysql_install_db cannot find its base automatically
mysql_install_db --basedir=${mysql_bin%/bin} --defaults-file=$mycnf --cross-bootstrap mysql_install_db --basedir=${mysql_bin%/bin} --defaults-file=`mycnf $n` --cross-bootstrap
fi fi
mysqld --defaults-file=$mycnf & mysqld --defaults-file=`mycnf $n` &
} }
# ---- generate test data ---- # ---- generate test data ----
...@@ -441,7 +491,7 @@ GENfs() { ...@@ -441,7 +491,7 @@ GENfs() {
# generate data in sqlite # generate data in sqlite
GENsqlite() { GENsqlite() {
test -e $ds/generated.sqlite && return test -e $ds/generated.sqlite·P$Pneo && return
echo -e '\n*** generating sqlite data...' echo -e '\n*** generating sqlite data...'
NEOpylite NEOpylite
# NOTE compression is disabled because when benchmarking server latency # NOTE compression is disabled because when benchmarking server latency
...@@ -450,12 +500,12 @@ GENsqlite() { ...@@ -450,12 +500,12 @@ GENsqlite() {
xneoctl set cluster stopping xneoctl set cluster stopping
wait # XXX fragile - won't work if there are children spawned outside wait # XXX fragile - won't work if there are children spawned outside
sync sync
touch $ds/generated.sqlite touch $ds/generated.sqlite·P$Pneo
} }
# generate data in mariadb # generate data in mariadb
GENsql() { GENsql() {
test -e $ds/generated.sql && return test -e $ds/generated.sql·P$Pneo && return
echo -e '\n*** generating sql data...' echo -e '\n*** generating sql data...'
NEOpysql NEOpysql
# NOTE compression is disabled - see ^^^ (sqlite) for rationale. # NOTE compression is disabled - see ^^^ (sqlite) for rationale.
...@@ -465,7 +515,7 @@ GENsql() { ...@@ -465,7 +515,7 @@ GENsql() {
xmysql -e "SHUTDOWN" xmysql -e "SHUTDOWN"
wait # XXX fragile wait # XXX fragile
sync sync
touch $ds/generated.sql touch $ds/generated.sql·P$Pneo
} }
# generate all test databases # generate all test databases
...@@ -1151,21 +1201,21 @@ zbench_local() { ...@@ -1151,21 +1201,21 @@ zbench_local() {
# XXX save time - we show only neo/py(!log)/sqlite # XXX save time - we show only neo/py(!log)/sqlite
#echo -e "\n*** NEO/py sqlite" #echo -e "\n*** NEO/py sqlite"
#NEOpylite #NEOpylite
#zbench neo://$neocluster@$Mbind neo/py/sqlite $zhashok #zbench neo://$neocluster@$Mbind neo/py/sqlite·P$Pneo $zhashok
#xneoctl set cluster stopping #xneoctl set cluster stopping
#wait #wait
# XXX JM asked to also have NEO/py with logging disabled # XXX JM asked to also have NEO/py with logging disabled
echo -e "\n*** NEO/py sqlite (logging disabled)" echo -e "\n*** NEO/py sqlite (logging disabled)"
X_NEOPY_LOG_SKIP=y NEOpylite X_NEOPY_LOG_SKIP=y NEOpylite
zbench neo://$neocluster@$Mbind "neo/py(!log)/sqlite" $zhashok zbench neo://$neocluster@$Mbind "neo/py(!log)/sqlite·P$Pneo" $zhashok
xneoctl set cluster stopping xneoctl set cluster stopping
wait wait
# XXX save time - we show only neo/py(!log)/sql # XXX save time - we show only neo/py(!log)/sql
#echo -e "\n*** NEO/py sql" #echo -e "\n*** NEO/py sql"
#NEOpysql #NEOpysql
#zbench neo://$neocluster@$Mbind neo/py/sql $zhashok #zbench neo://$neocluster@$Mbind neo/py/sql·P$Pneo $zhashok
#xneoctl set cluster stopping #xneoctl set cluster stopping
#xmysql -e "SHUTDOWN" #xmysql -e "SHUTDOWN"
#wait #wait
...@@ -1173,34 +1223,42 @@ zbench_local() { ...@@ -1173,34 +1223,42 @@ zbench_local() {
# XXX JM asked to also have NEO/py with logging disabled # XXX JM asked to also have NEO/py with logging disabled
echo -e "\n*** NEO/py sql (logging disabled)" echo -e "\n*** NEO/py sql (logging disabled)"
X_NEOPY_LOG_SKIP=y NEOpysql X_NEOPY_LOG_SKIP=y NEOpysql
zbench neo://$neocluster@$Mbind "neo/py(!log)/sql" $zhashok zbench neo://$neocluster@$Mbind "neo/py(!log)/sql·P$Pneo" $zhashok
xneoctl set cluster stopping xneoctl set cluster stopping
xmysql -e "SHUTDOWN" xmysql -e "SHUTDOWN"
wait wait
echo -e "\n*** NEO/go fs1" echo -e "\n*** NEO/go fs1"
NEOgofs1 NEOgofs1
zbench neo://$neocluster@$Mbind neo/go/fs1 $zhashok zbench neo://$neocluster@$Mbind neo/go/fs1·P1 $zhashok
xneoctl set cluster stopping xneoctl set cluster stopping
wait wait
echo -e "\n*** NEO/go fs1 (sha1 disabled on: storage, client)" echo -e "\n*** NEO/go fs1 (sha1 disabled on: storage, client)"
X_NEOGO_SHA1_SKIP=y NEOgofs1 X_NEOGO_SHA1_SKIP=y NEOgofs1
X_NEOGO_SHA1_SKIP=y zbench_go neo://$neocluster@$Mbind "neo/go/fs1(!sha1)" $zhashok X_NEOGO_SHA1_SKIP=y zbench_go neo://$neocluster@$Mbind "neo/go/fs1(!sha1)·P1" $zhashok
xneoctl set cluster stopping xneoctl set cluster stopping
wait wait
echo -e "\n*** NEO/go sqlite" echo -e "\n*** NEO/go sqlite"
NEOgolite if [ $Pneo == 1 ]; then
zbench neo://$neocluster@$Mbind neo/go/sqlite $zhashok NEOgolite
xneoctl set cluster stopping zbench neo://$neocluster@$Mbind neo/go/sqlite·P$Pneo $zhashok
wait xneoctl set cluster stopping
wait
else
echo "# skipping (NEO/go sqlite not yet ready to handle ·P<n>"
fi
echo -e "\n*** NEO/go sqlite (sha1 disabled on: client)" echo -e "\n*** NEO/go sqlite (sha1 disabled on: client)"
NEOgolite if [ $Pneo == 1 ]; then
X_NEOGO_SHA1_SKIP=y zbench_go neo://$neocluster@$Mbind "neo/go/sqlite" $zhashok NEOgolite
xneoctl set cluster stopping X_NEOGO_SHA1_SKIP=y zbench_go neo://$neocluster@$Mbind "neo/go/sqlite·P$Pneo" $zhashok
wait xneoctl set cluster stopping
wait
else
echo "# skipping (NEO/go sqlite not yet ready to handle ·P<n>"
fi
} }
# command: benchmark when server runs locally and client is on another node # command: benchmark when server runs locally and client is on another node
...@@ -1264,21 +1322,21 @@ zbench_cluster() { ...@@ -1264,21 +1322,21 @@ zbench_cluster() {
# XXX save time - we show only neo/py(!log)/sqlite # XXX save time - we show only neo/py(!log)/sqlite
#echo -e "\n*** NEO/py sqlite" #echo -e "\n*** NEO/py sqlite"
#NEOpylite #NEOpylite
#on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/py/sqlite $zhashok #on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/py/sqlite·P$Pneo $zhashok
#xneoctl set cluster stopping #xneoctl set cluster stopping
#wait #wait
# XXX JM asked to also have NEO/py with logging disabled # XXX JM asked to also have NEO/py with logging disabled
echo -e "\n*** NEO/py sqlite (logging disabled)" echo -e "\n*** NEO/py sqlite (logging disabled)"
X_NEOPY_LOG_SKIP=y NEOpylite X_NEOPY_LOG_SKIP=y NEOpylite
on $url ./neotest zbench-client neo://$neocluster@$Mbind "\\\"neo/py(!log)/sqlite\\\"" $zhashok on $url ./neotest zbench-client neo://$neocluster@$Mbind "\\\"neo/py(!log)/sqlite\\\"·P$Pneo" $zhashok
xneoctl set cluster stopping xneoctl set cluster stopping
wait wait
# XXX save time - we show only neo/py(!log)/sql # XXX save time - we show only neo/py(!log)/sql
#echo -e "\n*** NEO/py sql" #echo -e "\n*** NEO/py sql"
#NEOpysql #NEOpysql
#on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/py/sql $zhashok #on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/py/sql·P$Pneo $zhashok
#xneoctl set cluster stopping #xneoctl set cluster stopping
#xmysql -e "SHUTDOWN" #xmysql -e "SHUTDOWN"
#wait #wait
...@@ -1286,34 +1344,42 @@ zbench_cluster() { ...@@ -1286,34 +1344,42 @@ zbench_cluster() {
# XXX JM asked to also have NEO/py with logging disabled # XXX JM asked to also have NEO/py with logging disabled
echo -e "\n*** NEO/py sql (logging disabled)" echo -e "\n*** NEO/py sql (logging disabled)"
X_NEOPY_LOG_SKIP=y NEOpysql X_NEOPY_LOG_SKIP=y NEOpysql
on $url ./neotest zbench-client neo://$neocluster@$Mbind "\\\"neo/py(!log)/sql\\\"" $zhashok on $url ./neotest zbench-client neo://$neocluster@$Mbind "\\\"neo/py(!log)/sql\\\"·P$Pneo" $zhashok
xneoctl set cluster stopping xneoctl set cluster stopping
xmysql -e "SHUTDOWN" xmysql -e "SHUTDOWN"
wait wait
echo -e "\n*** NEO/go fs" echo -e "\n*** NEO/go fs"
NEOgofs1 NEOgofs1
on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/go/fs1 $zhashok on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/go/fs1·P1 $zhashok
xneoctl set cluster stopping xneoctl set cluster stopping
wait wait
echo -e "\n*** NEO/go fs1 (sha1 disabled on: storage, client)" echo -e "\n*** NEO/go fs1 (sha1 disabled on: storage, client)"
X_NEOGO_SHA1_SKIP=y NEOgofs1 X_NEOGO_SHA1_SKIP=y NEOgofs1
on $url X_NEOGO_SHA1_SKIP=y ./neotest zbench-client --goonly neo://$neocluster@$Mbind "\\\"neo/go/fs1(!sha1)\\\"" $zhashok on $url X_NEOGO_SHA1_SKIP=y ./neotest zbench-client --goonly neo://$neocluster@$Mbind "\\\"neo/go/fs1(!sha1)\\\"·P1" $zhashok
xneoctl set cluster stopping xneoctl set cluster stopping
wait wait
echo -e "\n*** NEO/go sqlite" echo -e "\n*** NEO/go sqlite"
NEOgolite if [ $Pneo == 1 ]; then
on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/go/sqlite $zhashok NEOgolite
xneoctl set cluster stopping on $url ./neotest zbench-client neo://$neocluster@$Mbind neo/go/sqlite·P$Pneo $zhashok
wait xneoctl set cluster stopping
wait
else
echo "# skipping (NEO/go sqlite not yet ready to handle ·P<n>"
fi
echo -e "\n*** NEO/go sqlite (sha1 disabled on: client)" echo -e "\n*** NEO/go sqlite (sha1 disabled on: client)"
NEOgolite if [ $Pneo == 1 ]; then
on $url X_NEOGO_SHA1_SKIP=y ./neotest zbench-client --goonly neo://$neocluster@$Mbind "\\\"neo/go/sqlite\\\"" $zhashok NEOgolite
xneoctl set cluster stopping on $url X_NEOGO_SHA1_SKIP=y ./neotest zbench-client --goonly neo://$neocluster@$Mbind "\\\"neo/go/sqlite\\\"·P$Pneo" $zhashok
wait xneoctl set cluster stopping
wait
else
echo "# skipping (NEO/go sqlite not yet ready to handle ·P<n>"
fi
} }
# command: benchmark client workload against separate server # command: benchmark client workload against separate server
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment