Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
294df8e7
Commit
294df8e7
authored
Feb 13, 2007
by
tomas@poseidon.mysql.com
Browse files
Options
Browse Files
Download
Plain Diff
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.1-new-ndb
into poseidon.mysql.com:/home/tomas/mysql-5.1-new-ndb
parents
7de79a2d
fe0d7ee4
Changes
24
Hide whitespace changes
Inline
Side-by-side
Showing
24 changed files
with
2849 additions
and
607 deletions
+2849
-607
storage/ndb/src/cw/cpcd/Makefile.am
storage/ndb/src/cw/cpcd/Makefile.am
+1
-1
storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
+1
-1
storage/ndb/test/run-test/Makefile.am
storage/ndb/test/run-test/Makefile.am
+14
-8
storage/ndb/test/run-test/atrt-gather-result.sh
storage/ndb/test/run-test/atrt-gather-result.sh
+1
-1
storage/ndb/test/run-test/atrt.hpp
storage/ndb/test/run-test/atrt.hpp
+161
-0
storage/ndb/test/run-test/autotest-boot.sh
storage/ndb/test/run-test/autotest-boot.sh
+165
-0
storage/ndb/test/run-test/autotest-run.sh
storage/ndb/test/run-test/autotest-run.sh
+269
-0
storage/ndb/test/run-test/conf-daily-basic-ndb08.txt
storage/ndb/test/run-test/conf-daily-basic-ndb08.txt
+0
-19
storage/ndb/test/run-test/conf-daily-devel-ndb08.txt
storage/ndb/test/run-test/conf-daily-devel-ndb08.txt
+0
-19
storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
+0
-22
storage/ndb/test/run-test/conf-daily-sql-ndb08.txt
storage/ndb/test/run-test/conf-daily-sql-ndb08.txt
+0
-20
storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
+0
-20
storage/ndb/test/run-test/conf-dl145a.cnf
storage/ndb/test/run-test/conf-dl145a.cnf
+23
-0
storage/ndb/test/run-test/conf-dl145a.txt
storage/ndb/test/run-test/conf-dl145a.txt
+0
-22
storage/ndb/test/run-test/conf-ndbmaster.cnf
storage/ndb/test/run-test/conf-ndbmaster.cnf
+23
-0
storage/ndb/test/run-test/conf-ndbmaster.txt
storage/ndb/test/run-test/conf-ndbmaster.txt
+0
-22
storage/ndb/test/run-test/conf-repl.cnf
storage/ndb/test/run-test/conf-repl.cnf
+28
-0
storage/ndb/test/run-test/conf-shark.txt
storage/ndb/test/run-test/conf-shark.txt
+0
-22
storage/ndb/test/run-test/example-my.cnf
storage/ndb/test/run-test/example-my.cnf
+116
-0
storage/ndb/test/run-test/files.cpp
storage/ndb/test/run-test/files.cpp
+383
-0
storage/ndb/test/run-test/main.cpp
storage/ndb/test/run-test/main.cpp
+674
-430
storage/ndb/test/run-test/setup.cpp
storage/ndb/test/run-test/setup.cpp
+965
-0
storage/ndb/test/run-test/test-tests.txt
storage/ndb/test/run-test/test-tests.txt
+24
-0
storage/ndb/test/tools/Makefile.am
storage/ndb/test/tools/Makefile.am
+1
-0
No files found.
storage/ndb/src/cw/cpcd/Makefile.am
View file @
294df8e7
...
...
@@ -26,7 +26,7 @@ LDADD_LOC = \
include
$(top_srcdir)/storage/ndb/config/common.mk.am
include
$(top_srcdir)/storage/ndb/config/type_util.mk.am
ndb_cpcd_LDFLAGS
=
@ndb_bin_am_ldflags@
ndb_cpcd_LDFLAGS
=
-static
@ndb_bin_am_ldflags@
# Don't update the files from bitkeeper
%
::
SCCS/s.%
...
...
storage/ndb/src/mgmsrv/InitConfigFileParser.cpp
View file @
294df8e7
...
...
@@ -837,7 +837,7 @@ InitConfigFileParser::parse_mycnf()
opt
.
arg_type
=
REQUIRED_ARG
;
options
.
push_back
(
opt
);
opt
.
name
=
"api"
;
opt
.
name
=
"
ndb
api"
;
opt
.
id
=
256
;
opt
.
value
=
(
gptr
*
)
malloc
(
sizeof
(
char
*
));
opt
.
var_type
=
GET_STR
;
...
...
storage/ndb/test/run-test/Makefile.am
View file @
294df8e7
...
...
@@ -18,20 +18,18 @@ testdir=$(prefix)/mysql-test/ndb
include
$(top_srcdir)/storage/ndb/config/common.mk.am
include
$(top_srcdir)/storage/ndb/config/type_util.mk.am
include
$(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am
include
$(top_srcdir)/storage/ndb/config/type_ndbapitools.mk.am
test_PROGRAMS
=
atrt
test_DATA
=
daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt
\
conf-daily-basic-ndb08.txt
\
conf-daily-devel-ndb08.txt
\
conf-daily-sql-ndb08.txt
\
conf-ndbmaster.txt
\
conf-shark.txt
\
conf-dl145a.txt
conf-ndbmaster.cnf
\
conf-dl145a.cnf test-tests.txt
test_SCRIPTS
=
atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh
\
atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
atrt-clear-result.sh autotest-run.sh
atrt_SOURCES
=
main.cpp setup.cpp files.cpp
atrt_SOURCES
=
main.cpp run-test.hpp
INCLUDES_LOC
=
-I
$(top_srcdir)
/storage/ndb/test/include
LDADD_LOC
=
$(top_builddir)
/storage/ndb/test/src/libNDBT.a
\
$(top_builddir)
/storage/ndb/src/libndbclient.la
\
...
...
@@ -39,6 +37,14 @@ LDADD_LOC = $(top_builddir)/storage/ndb/test/src/libNDBT.a \
$(top_builddir)
/mysys/libmysys.a
\
$(top_builddir)
/strings/libmystrings.a @NDB_SCI_LIBS@
atrt_CXXFLAGS
=
-I
$(top_srcdir)
/ndb/src/mgmapi
\
-I
$(top_srcdir)
/ndb/src/mgmsrv
\
-I
$(top_srcdir)
/ndb/include/mgmcommon
\
-DMYSQLCLUSTERDIR
=
"
\"\"
"
\
-DDEFAULT_PREFIX
=
"
\"
$(prefix)
\"
"
atrt_LDFLAGS
=
-static
@ndb_bin_am_ldflags@
wrappersdir
=
$(prefix)
/bin
wrappers_SCRIPTS
=
atrt-testBackup atrt-mysql-test-run
...
...
storage/ndb/test/run-test/atrt-gather-result.sh
View file @
294df8e7
...
...
@@ -8,7 +8,7 @@ rm -rf *
while
[
$#
-gt
0
]
do
rsync
-a
"
$1
"
.
rsync
-a
--exclude
=
'ndb_*_fs/*'
"
$1
"
.
shift
done
...
...
storage/ndb/test/run-test/
run-tes
t.hpp
→
storage/ndb/test/run-test/
atr
t.hpp
View file @
294df8e7
...
...
@@ -2,7 +2,8 @@
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
...
...
@@ -16,68 +17,112 @@
#ifndef atrt_config_hpp
#define atrt_config_hpp
#include <
getarg
.h>
#include <
ndb_global
.h>
#include <Vector.hpp>
#include <BaseString.hpp>
#include <Logger.hpp>
#include <mgmapi.h>
#include <CpcClient.hpp>
#include <Properties.hpp>
#undef MYSQL_CLIENT
enum
ErrorCodes
{
enum
ErrorCodes
{
ERR_OK
=
0
,
ERR_NDB_FAILED
=
101
,
ERR_SERVERS_FAILED
=
102
,
ERR_MAX_TIME_ELAPSED
=
103
};
struct
atrt_host
{
struct
atrt_host
{
size_t
m_index
;
BaseString
m_user
;
BaseString
m_base
_
dir
;
BaseString
m_basedir
;
BaseString
m_hostname
;
SimpleCpcClient
*
m_cpcd
;
Vector
<
struct
atrt_process
*>
m_processes
;
};
struct
atrt_options
{
enum
Feature
{
AO_REPLICATION
=
1
,
AO_NDBCLUSTER
=
2
};
int
m_features
;
Properties
m_loaded
;
Properties
m_generated
;
};
struct
atrt_process
{
struct
atrt_process
{
size_t
m_index
;
BaseString
m_hostname
;
struct
atrt_host
*
m_host
;
struct
atrt_cluster
*
m_cluster
;
enum
Type
{
A
LL
=
255
,
NDB_DB
=
1
,
NDB_API
=
2
,
NDB_MGM
=
4
,
NDB_REP
=
8
,
MYSQL_SERVER
=
16
,
MYSQL_CLIENT
=
32
A
P_ALL
=
255
,
AP_NDBD
=
1
,
AP_NDB_API
=
2
,
AP_NDB_MGMD
=
4
,
AP_MYSQLD
=
16
,
AP_CLIENT
=
32
,
AP_CLUSTER
=
256
// Used for options parsing for "cluster" options
}
m_type
;
SimpleCpcClient
::
Process
m_proc
;
short
m_ndb_mgm_port
;
NdbMgmHandle
m_ndb_mgm_handle
;
// if type == ndb_mgm
NdbMgmHandle
m_ndb_mgm_handle
;
// if type == ndb_mgm
atrt_process
*
m_mysqld
;
// if type == client
atrt_process
*
m_rep_src
;
// if type == mysqld
Vector
<
atrt_process
*>
m_rep_dst
;
// if type == mysqld
atrt_options
m_options
;
};
struct
atrt_cluster
{
BaseString
m_name
;
BaseString
m_dir
;
Vector
<
atrt_process
*>
m_processes
;
atrt_options
m_options
;
};
struct
atrt_config
{
struct
atrt_config
{
bool
m_generated
;
BaseString
m_key
;
Vector
<
atrt_host
>
m_hosts
;
Vector
<
atrt_process
>
m_processes
;
BaseString
m_replication
;
Vector
<
atrt_host
*>
m_hosts
;
Vector
<
atrt_cluster
*>
m_clusters
;
Vector
<
atrt_process
*>
m_processes
;
};
struct
atrt_testcase
{
struct
atrt_testcase
{
bool
m_report
;
bool
m_run_all
;
time_t
m_max_time
;
BaseString
m_command
;
BaseString
m_args
;
BaseString
m_name
;
};
extern
Logger
g_logger
;
bool
parse_args
(
int
argc
,
const
char
**
argv
);
void
require
(
bool
x
);
bool
parse_args
(
int
argc
,
char
**
argv
);
bool
setup_config
(
atrt_config
&
);
bool
configure
(
atrt_config
&
,
int
setup
);
bool
setup_directories
(
atrt_config
&
,
int
setup
);
bool
setup_files
(
atrt_config
&
,
int
setup
,
int
sshx
);
bool
deploy
(
atrt_config
&
);
bool
sshx
(
atrt_config
&
,
unsigned
procmask
);
bool
start
(
atrt_config
&
,
unsigned
procmask
);
bool
remove_dir
(
const
char
*
,
bool
incl
=
true
);
bool
connect_hosts
(
atrt_config
&
);
bool
connect_ndb_mgm
(
atrt_config
&
);
bool
wait_ndb
(
atrt_config
&
,
int
ndb_mgm_node_status
);
...
...
@@ -92,4 +137,25 @@ bool setup_test_case(atrt_config&, const atrt_testcase&);
bool
setup_hosts
(
atrt_config
&
);
/**
* Global variables...
*/
extern
Logger
g_logger
;
extern
atrt_config
g_config
;
extern
const
char
*
g_cwd
;
extern
const
char
*
g_my_cnf
;
extern
const
char
*
g_user
;
extern
const
char
*
g_basedir
;
extern
const
char
*
g_prefix
;
extern
int
g_baseport
;
extern
int
g_fqpn
;
extern
int
g_default_ports
;
extern
const
char
*
g_clusters
;
extern
const
char
*
save_file
;
extern
const
char
*
save_group_suffix
;
extern
char
*
save_extra_file
;
#endif
storage/ndb/test/run-test/autotest-boot.sh
0 → 100644
View file @
294df8e7
#!/bin/sh
#############################################################
# This script created by Jonas does the following #
# Cleans up clones and pevious builds, pulls new clones, #
# builds, deploys, configures the tests and launches ATRT #
#############################################################
###############
#Script setup #
##############
save_args
=
$*
VERSION
=
"autotest-boot.sh version 1.00"
DATE
=
`
date
'+%Y-%m-%d'
`
HOST
=
`
hostname
-s
`
export
DATE HOST
set
-e
echo
"
`
date
`
starting:
$*
"
verbose
=
0
do_clone
=
yes
build
=
yes
conf
=
LOCK
=
$HOME
/.autotest-lock
############################
# Read command line entries#
############################
while
[
"
$1
"
]
do
case
"
$1
"
in
--no-clone
)
do_clone
=
""
;;
--no-build
)
build
=
""
;;
--verbose
)
verbose
=
`
expr
$verbose
+ 1
`
;;
--clone
=
*
)
clone
=
`
echo
$1
|
sed
s/--clone
=
//
`
;;
--version
)
echo
$VERSION
;
exit
;;
--conf
=
*
)
conf
=
`
echo
$1
|
sed
s/--conf
=
//
`
;;
*
)
RUN
=
$*
;;
esac
shift
done
#################################
#Make sure the configfile exists#
#if it does not exit. if it does#
# (.) load it #
#################################
if
[
-z
"
$conf
"
]
then
conf
=
`
pwd
`
/autotest.conf
fi
if
[
-f
$conf
]
then
.
$conf
else
echo
"Can't find config file:
$conf
"
exit
fi
###############################
# Validate that all interesting
# variables where set in conf
###############################
vars
=
"src_clone_base install_dir build_dir"
for
i
in
$vars
do
t
=
`
echo echo
\\
$$
i
`
if
[
-z
"
`
eval
$t
`
"
]
then
echo
"Invalid config:
$conf
, variable
$i
is not set"
exit
fi
done
###############################
#Print out the enviroment vars#
###############################
if
[
$verbose
-gt
0
]
then
env
fi
####################################
# Setup the lock file name and path#
# Setup the clone source location #
####################################
src_clone
=
$src_clone_base
-
$clone
#######################################
# Check to see if the lock file exists#
# If it does exit. #
#######################################
if
[
-f
$LOCK
]
then
echo
"Lock file exists:
$LOCK
"
exit
1
fi
#######################################
# If the lock file does not exist then#
# create it with date and run info #
#######################################
echo
"
$DATE
$RUN
"
>
$LOCK
#############################
#If any errors here down, we#
# trap them, and remove the #
# Lock file before exit #
#############################
if
[
`
uname
-s
`
!=
"SunOS"
]
then
trap
"rm -f
$LOCK
"
ERR
fi
# You can add more to this path#
################################
dst_place
=
${
build_dir
}
/clone-mysql-
$clone
-
$DATE
.
$$
#########################################
# Delete source and pull down the latest#
#########################################
if
[
"
$do_clone
"
]
then
rm
-rf
$dst_place
bk clone
$src_clone
$dst_place
fi
##########################################
# Build the source, make installs, and #
# create the database to be rsynced #
##########################################
if
[
"
$build
"
]
then
cd
$dst_place
rm
-rf
$install_dir
BUILD/compile-ndb-autotest
--prefix
=
$install_dir
make
install
fi
################################
# Start run script #
################################
script
=
$install_dir
/mysql-test/ndb/autotest-run.sh
$script
$save_args
--conf
=
$conf
--install-dir
=
$install_dir
--suite
=
$RUN
--nolock
if
[
"
$build
"
]
then
rm
-rf
$dst_place
fi
rm
-f
$LOCK
storage/ndb/test/run-test/autotest-run.sh
0 → 100644
View file @
294df8e7
#!/bin/sh
#############################################################
# This script created by Jonas does the following #
# Cleans up clones and pevious builds, pulls new clones, #
# builds, deploys, configures the tests and launches ATRT #
#############################################################
###############
#Script setup #
##############
save_args
=
$*
VERSION
=
"autotest-run.sh version 1.00"
DATE
=
`
date
'+%Y-%m-%d'
`
HOST
=
`
hostname
-s
`
export
DATE HOST
set
-e
ulimit
-Sc
unlimited
echo
"
`
date
`
starting:
$*
"
RSYNC_RSH
=
ssh
export
RSYNC_RSH
verbose
=
0
report
=
yes
nolock
=
RUN
=
"daily-basic"
conf
=
autotest.conf
LOCK
=
$HOME
/.autotest-lock
############################
# Read command line entries#
############################
while
[
"
$1
"
]
do
case
"
$1
"
in
--verbose
)
verbose
=
`
expr
$verbose
+ 1
`
;;
--conf
=
*
)
conf
=
`
echo
$1
|
sed
s/--conf
=
//
`
;;
--version
)
echo
$VERSION
;
exit
;;
--suite
=
*
)
RUN
=
`
echo
$1
|
sed
s/--suite
=
//
`
;;
--install-dir
=
*
)
install_dir
=
`
echo
$1
|
sed
s/--install-dir
=
//
`
;;
--clone
=
*
)
clone
=
`
echo
$1
|
sed
s/--clone
=
//
`
;;
--nolock
)
nolock
=
true
;;
esac
shift
done
#################################
#Make sure the configfile exists#
#if it does not exit. if it does#
# (.) load it #
#################################
install_dir_save
=
$install_dir
if
[
-f
$conf
]
then
.
$conf
else
echo
"Can't find config file:
$conf
"
exit
fi
install_dir
=
$install_dir_save
###############################
# Validate that all interesting
# variables where set in conf
###############################
vars
=
"target base_dir install_dir hosts"
if
[
"
$report
"
]
then
vars
=
"
$vars
result_host result_path"
fi
for
i
in
$vars
do
t
=
`
echo echo
\\
$$
i
`
if
[
-z
"
`
eval
$t
`
"
]
then
echo
"Invalid config:
$conf
, variable
$i
is not set"
exit
fi
done
###############################
#Print out the enviroment vars#
###############################
if
[
$verbose
-gt
0
]
then
env
fi
#######################################
# Check to see if the lock file exists#
# If it does exit. #
#######################################
if
[
-z
"
$nolock
"
]
then
if
[
-f
$LOCK
]
then
echo
"Lock file exists:
$LOCK
"
exit
1
fi
echo
"
$DATE
$RUN
"
>
$LOCK
fi
#############################
#If any errors here down, we#
# trap them, and remove the #
# Lock file before exit #
#############################
if
[
`
uname
-s
`
!=
"SunOS"
]
then
trap
"rm -f
$LOCK
"
ERR
fi
###############################################
# Check that all interesting files are present#
###############################################
test_dir
=
$install_dir
/mysql-test/ndb
atrt
=
$test_dir
/atrt
test_file
=
$test_dir
/
$RUN
-tests
.txt
if
[
!
-f
"
$test_file
"
]
then
echo
"Cant find testfile:
$test_file
"
exit
1
fi
if
[
!
-x
"
$atrt
"
]
then
echo
"Cant find atrt binary at
$atrt
"
exit
1
fi
############################
# check ndb_cpcc fail hosts#
############################
failed
=
`
ndb_cpcc
$hosts
|
awk
'{ if($1=="Failed"){ print;}}'
`
if
[
"
$failed
"
]
then
echo
"Cant contact cpcd on
$failed
, exiting"
exit
1
fi
#############################
# Function for replacing the#
# choose host with real host#
# names. Note $$ = PID #
#############################
choose
(){
SRC
=
$1
TMP1
=
/tmp/choose.
$$
TMP2
=
/tmp/choose.
$$
.
$$
shift
cp
$SRC
$TMP1
i
=
1
while
[
$#
-gt
0
]
do
sed
-e
s,
"CHOOSE_host
$i
"
,
$1
,g <
$TMP1
>
$TMP2
mv
$TMP2
$TMP1
shift
i
=
`
expr
$i
+ 1
`
done
cat
$TMP1
rm
-f
$TMP1
}
choose_conf
(){
if
[
-f
$test_dir
/conf-
$1
-
$HOST
.cnf
]
then
echo
"
$test_dir
/conf-
$1
-
$HOST
.cnf"
elif
[
-f
$test_dir
/conf-
$1
.cnf
]
then
echo
"
$test_dir
/conf-
$1
.cnf"
elif
[
-f
$test_dir
/conf-
$HOST
.cnf
]
then
echo
"
$test_dir
/conf-
$HOST
.cnf"
else
echo
"Unable to find conf file looked for"
1>&2
echo
"
$test_dir
/conf-
$1
-
$HOST
.cnf and"
1>&2
echo
"
$test_dir
/conf-
$HOST
.cnf"
1>&2
echo
"
$test_dir
/conf-
$1
.cnf"
1>&2
exit
fi
}
#########################################
# Count how many computers we have ready#
#########################################
count_hosts
(){
cnt
=
`
grep
"CHOOSE_host"
$1
|
awk
'{for(i=1; i<=NF;i++) \
if(index($i, "CHOOSE_host") > 0) print $i;}'
|
sort
|
uniq
|
wc
-l
`
echo
$cnt
}
conf
=
`
choose_conf
$RUN
`
count
=
`
count_hosts
$conf
`
avail
=
`
echo
$hosts
|
wc
-w
`
if
[
$count
-gt
$avail
]
then
echo
"Not enough hosts"
echo
"Needs:
$count
available:
$avail
(
$avail_hosts
)"
exit
1
fi
###
# Make directories needed
p
=
`
pwd
`
run_dir
=
$install_dir
/run-
$RUN
-mysql-
$clone
-
$target
res_dir
=
$base_dir
/result-
$RUN
-mysql-
$clone
-
$target
/
$DATE
tar_dir
=
$base_dir
/saved-results
mkdir
-p
$run_dir
$res_dir
$tar_dir
rm
-rf
$res_dir
/
*
$run_dir
/
*
###
#
# Do sed substitiutions
#
cd
$run_dir
choose
$conf
$hosts
>
d.tmp.
$$
sed
-e
s,CHOOSE_dir,
"
$run_dir
/run"
,g < d.tmp.
$$
>
my.cnf
# Setup configuration
$atrt
Cdq my.cnf
# Start...
$atrt
--report-file
=
report.txt
--log-file
=
log.txt
--testcase-file
=
$test_dir
/
$RUN
-tests
.txt my.cnf
# Make tar-ball
[
-f
log.txt
]
&&
mv
log.txt
$res_dir
[
-f
report.txt
]
&&
mv
report.txt
$res_dir
[
"
`
find
.
-name
'result*'
`
"
]
&&
mv
result
*
$res_dir
cd
$res_dir
echo
"date=
$DATE
"
>
info.txt
echo
"suite=
$RUN
"
>>
info.txt
echo
"clone=
$clone
"
>>
info.txt
echo
"arch=
$target
"
>>
info.txt
cd
..
p2
=
`
pwd
`
cd
..
tarfile
=
res.
$RUN
.
$clone
.
$target
.
$DATE
.
$HOST
.
$$
.tgz
tar
cfz
$tar_dir
/
$tarfile
`
basename
$p2
`
/
$DATE
if
[
"
$report
"
]
then
scp
$tar_dir
/
$tarfile
$result_host
:
$result_path
/
fi
cd
$p
rm
-rf
$res_dir
$run_dir
if
[
-z
"
$nolock
"
]
then
rm
-f
$LOCK
fi
storage/ndb/test/run-test/conf-daily-basic-ndb08.txt
deleted
100644 → 0
View file @
7de79a2d
baseport: 14000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
storage/ndb/test/run-test/conf-daily-devel-ndb08.txt
deleted
100644 → 0
View file @
7de79a2d
baseport: 16000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
storage/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
deleted
100644 → 0
View file @
7de79a2d
baseport: 16000
basedir: CHOOSE_dir
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: CHOOSE_dir/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M
storage/ndb/test/run-test/conf-daily-sql-ndb08.txt
deleted
100644 → 0
View file @
7de79a2d
baseport: 16000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3
mysqld: CHOOSE_host1 CHOOSE_host4
mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
storage/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
deleted
100644 → 0
View file @
7de79a2d
baseport: 16000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3
mysqld: CHOOSE_host1 CHOOSE_host4
mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 16000
ArbitrationRank: 1
DataDir: .
storage/ndb/test/run-test/conf-dl145a.cnf
0 → 100644
View file @
294df8e7
[atrt]
basedir = CHOOSE_dir
baseport = 14000
clusters = .2node
[ndb_mgmd]
[mysqld]
skip-innodb
skip-bdb
[cluster_config.2node]
ndb_mgmd = CHOOSE_host1
ndbd = CHOOSE_host2,CHOOSE_host3
ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1
NoOfReplicas = 2
IndexMemory = 100M
DataMemory = 300M
BackupMemory = 64M
MaxNoOfConcurrentScans = 100
MaxNoOfSavedMessages= 1000
SendBufferMemory = 2M
storage/ndb/test/run-test/conf-dl145a.txt
deleted
100644 → 0
View file @
7de79a2d
baseport: 14000
basedir: /home/ndbdev/autotest/run
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /home/ndbdev/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M
storage/ndb/test/run-test/conf-ndbmaster.cnf
0 → 100644
View file @
294df8e7
[atrt]
basedir = CHOOSE_dir
baseport = 14000
clusters = .4node
[ndb_mgmd]
[mysqld]
skip-innodb
skip-bdb
[cluster_config.4node]
ndb_mgmd = CHOOSE_host1
ndbd = CHOOSE_host2,CHOOSE_host3,CHOOSE_host2,CHOOSE_host3
ndbapi= CHOOSE_host1,CHOOSE_host1,CHOOSE_host1
NoOfReplicas = 2
IndexMemory = 100M
DataMemory = 300M
BackupMemory = 64M
MaxNoOfConcurrentScans = 100
MaxNoOfSavedMessages= 1000
SendBufferMemory = 2M
storage/ndb/test/run-test/conf-ndbmaster.txt
deleted
100644 → 0
View file @
7de79a2d
baseport: 14000
basedir: CHOOSE_dir
mgm: CHOOSE_host1
ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: CHOOSE_dir/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M
storage/ndb/test/run-test/conf-repl.cnf
0 → 100644
View file @
294df8e7
[atrt]
basedir=CHOOSE_dir
baseport=15000
clusters= .master,.slave
replicate= 1.master:1.slave
[ndb_mgmd]
[mysqld]
skip-innodb
skip-bdb
[cluster_config]
MaxNoOfSavedMessages= 1000
DataMemory = 100M
[cluster_config.master]
NoOfReplicas = 2
ndb_mgmd = CHOOSE_host1
ndbd = CHOOSE_host2,CHOOSE_host3
mysqld = CHOOSE_host1
ndbapi= CHOOSE_host1
[cluster_config.slave]
NoOfReplicas = 1
ndb_mgmd = CHOOSE_host4
ndbd = CHOOSE_host4
mysqld = CHOOSE_host4
storage/ndb/test/run-test/conf-shark.txt
deleted
100644 → 0
View file @
7de79a2d
baseport: 14000
basedir: /space/autotest
mgm: CHOOSE_host1
ndb: CHOOSE_host1 CHOOSE_host1
api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
-- cluster config
[DB DEFAULT]
NoOfReplicas: 2
IndexMemory: 100M
DataMemory: 300M
BackupMemory: 64M
MaxNoOfConcurrentScans: 100
DataDir: .
FileSystemPath: /space/autotest/run
[MGM DEFAULT]
PortNumber: 14000
ArbitrationRank: 1
DataDir: .
[TCP DEFAULT]
SendBufferMemory: 2M
storage/ndb/test/run-test/example-my.cnf
0 → 100644
View file @
294df8e7
[atrt]
basedir=/home/jonas/atrt
baseport=10000
clusters = .master
clusters= .master,.slave
replicate = 1.master:1.slave
replicate = 2.master:2.slave
[cluster_config]
NoOfReplicas= 2
IndexMemory= 10M
DataMemory= 50M
MaxNoOfConcurrentScans= 100
Diskless = 1
[cluster_config.master]
ndb_mgmd = local1
ndbd = local1,local1
mysqld = local1,local1
ndbapi= local1
NoOfReplicas= 2
[cluster_config.slave]
ndb_mgmd = local1
ndbd = local1
ndbapi= local1
mysqld = local1,local1
NoOfReplicas= 1
[mysqld]
skip-innodb
skip-bdb
#
# Generated by atrt
# Mon May 29 23:27:49 2006
[mysql_cluster.master]
ndb-connectstring= local1:10000
[cluster_config.ndb_mgmd.1.master]
PortNumber= 10000
[cluster_config.ndbd.1.master]
FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.1
[cluster_config.ndbd.2.master]
FileSystemPath= /home/jonas/atrt/cluster.master/ndbd.2
[mysqld.1.master]
datadir= /home/jonas/atrt/cluster.master/mysqld.1
socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock
port= 10001
server-id= 1
log-bin
ndb-connectstring= local1:10000
ndbcluster
[client.1.master]
socket= /home/jonas/atrt/cluster.master/mysqld.1/mysql.sock
port= 10001
[mysqld.2.master]
datadir= /home/jonas/atrt/cluster.master/mysqld.2
socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock
port= 10002
server-id= 2
log-bin
ndb-connectstring= local1:10000
ndbcluster
[client.2.master]
socket= /home/jonas/atrt/cluster.master/mysqld.2/mysql.sock
port= 10002
[mysql_cluster.slave]
ndb-connectstring= local1:10003
[cluster_config.ndb_mgmd.1.slave]
PortNumber= 10003
[cluster_config.ndbd.1.slave]
FileSystemPath= /home/jonas/atrt/cluster.slave/ndbd.1
[mysqld.1.slave]
datadir= /home/jonas/atrt/cluster.slave/mysqld.1
socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock
port= 10004
server-id= 3
master-host= local1
master-port= 10001
master-user= root
master-password= ""
ndb-connectstring= local1:10003
ndbcluster
[client.1.slave]
socket= /home/jonas/atrt/cluster.slave/mysqld.1/mysql.sock
port= 10004
[mysqld.2.slave]
datadir= /home/jonas/atrt/cluster.slave/mysqld.2
socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock
port= 10005
server-id= 4
master-host= local1
master-port= 10002
master-user= root
master-password= ""
ndb-connectstring= local1:10003
ndbcluster
[client.2.slave]
socket= /home/jonas/atrt/cluster.slave/mysqld.2/mysql.sock
port= 10005
storage/ndb/test/run-test/files.cpp
0 → 100644
View file @
294df8e7
#include "atrt.hpp"
#include <sys/types.h>
#include <dirent.h>
static
bool
create_directory
(
const
char
*
path
);
bool
setup_directories
(
atrt_config
&
config
,
int
setup
)
{
/**
* 0 = validate
* 1 = setup
* 2 = setup+clean
*/
for
(
size_t
i
=
0
;
i
<
config
.
m_clusters
.
size
();
i
++
)
{
atrt_cluster
&
cluster
=
*
config
.
m_clusters
[
i
];
for
(
size_t
j
=
0
;
j
<
cluster
.
m_processes
.
size
();
j
++
)
{
atrt_process
&
proc
=
*
cluster
.
m_processes
[
j
];
const
char
*
dir
=
proc
.
m_proc
.
m_cwd
.
c_str
();
struct
stat
sbuf
;
int
exists
=
0
;
if
(
lstat
(
dir
,
&
sbuf
)
==
0
)
{
if
(
S_ISDIR
(
sbuf
.
st_mode
))
exists
=
1
;
else
exists
=
-
1
;
}
switch
(
setup
){
case
0
:
switch
(
exists
){
case
0
:
g_logger
.
error
(
"Could not find directory: %s"
,
dir
);
return
false
;
case
-
1
:
g_logger
.
error
(
"%s is not a directory!"
,
dir
);
return
false
;
}
break
;
case
1
:
if
(
exists
==
-
1
)
{
g_logger
.
error
(
"%s is not a directory!"
,
dir
);
return
false
;
}
break
;
case
2
:
if
(
exists
==
1
)
{
if
(
!
remove_dir
(
dir
))
{
g_logger
.
error
(
"Failed to remove %s!"
,
dir
);
return
false
;
}
exists
=
0
;
break
;
}
else
if
(
exists
==
-
1
)
{
if
(
!
unlink
(
dir
))
{
g_logger
.
error
(
"Failed to remove %s!"
,
dir
);
return
false
;
}
exists
=
0
;
}
}
if
(
exists
!=
1
)
{
if
(
!
create_directory
(
dir
))
{
return
false
;
}
}
}
}
return
true
;
}
static
void
printfile
(
FILE
*
out
,
Properties
&
props
,
const
char
*
section
,
...)
{
Properties
::
Iterator
it
(
&
props
);
const
char
*
name
=
it
.
first
();
if
(
name
)
{
va_list
ap
;
va_start
(
ap
,
section
);
/* const int ret = */
vfprintf
(
out
,
section
,
ap
);
va_end
(
ap
);
fprintf
(
out
,
"
\n
"
);
for
(;
name
;
name
=
it
.
next
())
{
const
char
*
val
;
props
.
get
(
name
,
&
val
);
fprintf
(
out
,
"%s %s
\n
"
,
name
+
2
,
val
);
}
fprintf
(
out
,
"
\n
"
);
}
fflush
(
out
);
}
bool
setup_files
(
atrt_config
&
config
,
int
setup
,
int
sshx
)
{
/**
* 0 = validate
* 1 = setup
* 2 = setup+clean
*/
BaseString
mycnf
;
mycnf
.
assfmt
(
"%s/my.cnf"
,
g_basedir
);
if
(
mycnf
!=
g_my_cnf
)
{
struct
stat
sbuf
;
int
ret
=
lstat
(
mycnf
.
c_str
(),
&
sbuf
);
if
(
ret
==
0
)
{
if
(
unlink
(
mycnf
.
c_str
())
!=
0
)
{
g_logger
.
error
(
"Failed to remove %s"
,
mycnf
.
c_str
());
return
false
;
}
}
BaseString
cp
=
"cp "
;
cp
.
appfmt
(
"%s %s"
,
g_my_cnf
,
mycnf
.
c_str
());
if
(
system
(
cp
.
c_str
())
!=
0
)
{
g_logger
.
error
(
"Failed to '%s'"
,
cp
.
c_str
());
return
false
;
}
}
if
(
setup
==
2
||
config
.
m_generated
)
{
/**
* Do mysql_install_db
*/
for
(
size_t
i
=
0
;
i
<
config
.
m_clusters
.
size
();
i
++
)
{
atrt_cluster
&
cluster
=
*
config
.
m_clusters
[
i
];
for
(
size_t
j
=
0
;
j
<
cluster
.
m_processes
.
size
();
j
++
)
{
atrt_process
&
proc
=
*
cluster
.
m_processes
[
j
];
if
(
proc
.
m_type
==
atrt_process
::
AP_MYSQLD
)
{
const
char
*
val
;
require
(
proc
.
m_options
.
m_loaded
.
get
(
"--datadir="
,
&
val
));
BaseString
tmp
;
tmp
.
assfmt
(
"%s/bin/mysql_install_db --datadir=%s > /dev/null 2>&1"
,
g_prefix
,
val
);
if
(
system
(
tmp
.
c_str
())
!=
0
)
{
g_logger
.
error
(
"Failed to mysql_install_db for %s"
,
proc
.
m_proc
.
m_cwd
.
c_str
());
}
else
{
g_logger
.
info
(
"mysql_install_db for %s"
,
proc
.
m_proc
.
m_cwd
.
c_str
());
}
}
}
}
}
FILE
*
out
=
NULL
;
if
(
config
.
m_generated
==
false
)
{
g_logger
.
info
(
"Nothing configured..."
);
}
else
{
out
=
fopen
(
mycnf
.
c_str
(),
"a+"
);
if
(
out
==
0
)
{
g_logger
.
error
(
"Failed to open %s for append"
,
mycnf
.
c_str
());
return
false
;
}
time_t
now
=
time
(
0
);
fprintf
(
out
,
"#
\n
# Generated by atrt
\n
"
);
fprintf
(
out
,
"# %s
\n
"
,
ctime
(
&
now
));
}
for
(
size_t
i
=
0
;
i
<
config
.
m_clusters
.
size
();
i
++
)
{
atrt_cluster
&
cluster
=
*
config
.
m_clusters
[
i
];
if
(
out
)
{
Properties
::
Iterator
it
(
&
cluster
.
m_options
.
m_generated
);
printfile
(
out
,
cluster
.
m_options
.
m_generated
,
"[mysql_cluster%s]"
,
cluster
.
m_name
.
c_str
());
}
for
(
size_t
j
=
0
;
j
<
cluster
.
m_processes
.
size
();
j
++
)
{
atrt_process
&
proc
=
*
cluster
.
m_processes
[
j
];
if
(
out
)
{
switch
(
proc
.
m_type
){
case
atrt_process
:
:
AP_NDB_MGMD
:
printfile
(
out
,
proc
.
m_options
.
m_generated
,
"[cluster_config.ndb_mgmd.%d%s]"
,
proc
.
m_index
,
proc
.
m_cluster
->
m_name
.
c_str
());
break
;
case
atrt_process
:
:
AP_NDBD
:
printfile
(
out
,
proc
.
m_options
.
m_generated
,
"[cluster_config.ndbd.%d%s]"
,
proc
.
m_index
,
proc
.
m_cluster
->
m_name
.
c_str
());
break
;
case
atrt_process
:
:
AP_MYSQLD
:
printfile
(
out
,
proc
.
m_options
.
m_generated
,
"[mysqld.%d%s]"
,
proc
.
m_index
,
proc
.
m_cluster
->
m_name
.
c_str
());
break
;
case
atrt_process
:
:
AP_NDB_API
:
break
;
case
atrt_process
:
:
AP_CLIENT
:
printfile
(
out
,
proc
.
m_options
.
m_generated
,
"[client.%d%s]"
,
proc
.
m_index
,
proc
.
m_cluster
->
m_name
.
c_str
());
break
;
case
atrt_process
:
:
AP_ALL
:
case
atrt_process
:
:
AP_CLUSTER
:
abort
();
}
}
/**
* Create env.sh
*/
BaseString
tmp
;
tmp
.
assfmt
(
"%s/env.sh"
,
proc
.
m_proc
.
m_cwd
.
c_str
());
char
**
env
=
BaseString
::
argify
(
0
,
proc
.
m_proc
.
m_env
.
c_str
());
if
(
env
[
0
])
{
Vector
<
BaseString
>
keys
;
FILE
*
fenv
=
fopen
(
tmp
.
c_str
(),
"w+"
);
if
(
fenv
==
0
)
{
g_logger
.
error
(
"Failed to open %s for writing"
,
tmp
.
c_str
());
return
false
;
}
for
(
size_t
k
=
0
;
env
[
k
];
k
++
)
{
tmp
=
env
[
k
];
int
pos
=
tmp
.
indexOf
(
'='
);
require
(
pos
>
0
);
env
[
k
][
pos
]
=
0
;
fprintf
(
fenv
,
"%s=
\"
%s
\"\n
"
,
env
[
k
],
env
[
k
]
+
pos
+
1
);
keys
.
push_back
(
env
[
k
]);
free
(
env
[
k
]);
}
fprintf
(
fenv
,
"PATH=%s/bin:%s/libexec:$PATH
\n
"
,
g_prefix
,
g_prefix
);
keys
.
push_back
(
"PATH"
);
for
(
size_t
k
=
0
;
k
<
keys
.
size
();
k
++
)
fprintf
(
fenv
,
"export %s
\n
"
,
keys
[
k
].
c_str
());
fflush
(
fenv
);
fclose
(
fenv
);
}
free
(
env
);
tmp
.
assfmt
(
"%s/ssh-login.sh"
,
proc
.
m_proc
.
m_cwd
.
c_str
());
FILE
*
fenv
=
fopen
(
tmp
.
c_str
(),
"w+"
);
if
(
fenv
==
0
)
{
g_logger
.
error
(
"Failed to open %s for writing"
,
tmp
.
c_str
());
return
false
;
}
fprintf
(
fenv
,
"#!/bin/sh
\n
"
);
fprintf
(
fenv
,
"cd %s
\n
"
,
proc
.
m_proc
.
m_cwd
.
c_str
());
fprintf
(
fenv
,
"[ -f /etc/profile ] && . /etc/profile
\n
"
);
fprintf
(
fenv
,
". env.sh
\n
"
);
fprintf
(
fenv
,
"ulimit -Sc unlimited
\n
"
);
fprintf
(
fenv
,
"bash -i"
);
fflush
(
fenv
);
fclose
(
fenv
);
}
}
if
(
out
)
{
fflush
(
out
);
fclose
(
out
);
}
return
true
;
}
static
bool
create_directory
(
const
char
*
path
)
{
BaseString
tmp
(
path
);
Vector
<
BaseString
>
list
;
if
(
tmp
.
split
(
list
,
"/"
)
==
0
)
{
g_logger
.
error
(
"Failed to create directory: %s"
,
tmp
.
c_str
());
return
false
;
}
BaseString
cwd
=
"/"
;
for
(
size_t
i
=
0
;
i
<
list
.
size
();
i
++
)
{
cwd
.
append
(
list
[
i
].
c_str
());
cwd
.
append
(
"/"
);
mkdir
(
cwd
.
c_str
(),
S_IRUSR
|
S_IWUSR
|
S_IXUSR
|
S_IXGRP
|
S_IRGRP
);
}
struct
stat
sbuf
;
if
(
lstat
(
path
,
&
sbuf
)
!=
0
||
!
S_ISDIR
(
sbuf
.
st_mode
))
{
g_logger
.
error
(
"Failed to create directory: %s (%s)"
,
tmp
.
c_str
(),
cwd
.
c_str
());
return
false
;
}
return
true
;
}
bool
remove_dir
(
const
char
*
path
,
bool
inclusive
)
{
DIR
*
dirp
=
opendir
(
path
);
if
(
dirp
==
0
)
{
if
(
errno
!=
ENOENT
)
{
g_logger
.
error
(
"Failed to remove >%s< errno: %d %s"
,
path
,
errno
,
strerror
(
errno
));
return
false
;
}
return
true
;
}
struct
dirent
*
dp
;
BaseString
name
=
path
;
name
.
append
(
"/"
);
while
((
dp
=
readdir
(
dirp
))
!=
NULL
)
{
if
((
strcmp
(
"."
,
dp
->
d_name
)
!=
0
)
&&
(
strcmp
(
".."
,
dp
->
d_name
)
!=
0
))
{
BaseString
tmp
=
name
;
tmp
.
append
(
dp
->
d_name
);
if
(
remove
(
tmp
.
c_str
())
==
0
)
{
continue
;
}
if
(
!
remove_dir
(
tmp
.
c_str
()))
{
closedir
(
dirp
);
return
false
;
}
}
}
closedir
(
dirp
);
if
(
inclusive
)
{
if
(
rmdir
(
path
)
!=
0
)
{
g_logger
.
error
(
"Failed to remove >%s< errno: %d %s"
,
path
,
errno
,
strerror
(
errno
));
return
false
;
}
}
return
true
;
}
storage/ndb/test/run-test/main.cpp
View file @
294df8e7
...
...
@@ -14,20 +14,19 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include
<ndb_global.h>
#include <
getarg
.h>
#include <
BaseString.hpp
>
#include <Parser.hpp>
#include
"atrt.hpp"
#include <
my_sys
.h>
#include <
my_getopt.h
>
#include <NdbOut.hpp>
#include <Properties.hpp>
#include <NdbAutoPtr.hpp>
#include "run-test.hpp"
#include <SysLogHandler.hpp>
#include <FileLogHandler.hpp>
#include <mgmapi.h>
#include "CpcClient.hpp"
#include <NdbSleep.h>
#define PATH_SEPARATOR "/"
/** Global variables */
static
const
char
progname
[]
=
"ndb_atrt"
;
...
...
@@ -36,76 +35,198 @@ static const char * g_analyze_progname = "atrt-analyze-result.sh";
static
const
char
*
g_clear_progname
=
"atrt-clear-result.sh"
;
static
const
char
*
g_setup_progname
=
"atrt-setup.sh"
;
static
const
char
*
g_setup_path
=
0
;
static
const
char
*
g_process_config_filename
=
"d.txt"
;
static
const
char
*
g_log_filename
=
0
;
static
const
char
*
g_test_case_filename
=
0
;
static
const
char
*
g_report_filename
=
0
;
static
const
char
*
g_default_user
=
0
;
static
const
char
*
g_default_base_dir
=
0
;
static
int
g_default_base_port
=
0
;
static
int
g_mysqld_use_base
=
1
;
static
int
g_do_setup
=
0
;
static
int
g_do_deploy
=
0
;
static
int
g_do_sshx
=
0
;
static
int
g_do_start
=
0
;
static
int
g_do_quit
=
0
;
static
int
g_
report
=
0
;
static
int
g_verbosity
=
0
;
static
int
g_
help
=
0
;
static
int
g_verbosity
=
1
;
static
FILE
*
g_report_file
=
0
;
static
FILE
*
g_test_case_file
=
stdin
;
static
int
g_mode
=
0
;
Logger
g_logger
;
atrt_config
g_config
;
static
int
g_mode_bench
=
0
;
static
int
g_mode_regression
=
0
;
static
int
g_mode_interactive
=
0
;
static
int
g_mode
=
0
;
static
struct
getargs
args
[]
=
{
{
"process-config"
,
0
,
arg_string
,
&
g_process_config_filename
,
0
,
0
},
{
"setup-path"
,
0
,
arg_string
,
&
g_setup_path
,
0
,
0
},
{
0
,
'v'
,
arg_counter
,
&
g_verbosity
,
0
,
0
},
{
"log-file"
,
0
,
arg_string
,
&
g_log_filename
,
0
,
0
},
{
"testcase-file"
,
'f'
,
arg_string
,
&
g_test_case_filename
,
0
,
0
},
{
0
,
'R'
,
arg_flag
,
&
g_report
,
0
,
0
},
{
"report-file"
,
0
,
arg_string
,
&
g_report_filename
,
0
,
0
},
{
"interactive"
,
'i'
,
arg_flag
,
&
g_mode_interactive
,
0
,
0
},
{
"regression"
,
'r'
,
arg_flag
,
&
g_mode_regression
,
0
,
0
},
{
"bench"
,
'b'
,
arg_flag
,
&
g_mode_bench
,
0
,
0
},
const
char
*
g_user
=
0
;
int
g_baseport
=
10000
;
int
g_fqpn
=
0
;
int
g_default_ports
=
0
;
const
char
*
g_cwd
=
0
;
const
char
*
g_basedir
=
0
;
const
char
*
g_my_cnf
=
0
;
const
char
*
g_prefix
=
0
;
const
char
*
g_clusters
=
0
;
BaseString
g_replicate
;
const
char
*
save_file
=
0
;
char
*
save_extra_file
=
0
;
const
char
*
save_group_suffix
=
0
;
const
char
*
g_dummy
;
char
*
g_env_path
=
0
;
/** Dummy, extern declared in ndb_opts.h */
int
g_print_full_config
=
0
,
opt_ndb_shm
;
my_bool
opt_core
;
static
struct
my_option
g_options
[]
=
{
{
"help"
,
'?'
,
"Display this help and exit."
,
(
gptr
*
)
&
g_help
,
(
gptr
*
)
&
g_help
,
0
,
GET_BOOL
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"version"
,
'V'
,
"Output version information and exit."
,
0
,
0
,
0
,
GET_NO_ARG
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"clusters"
,
256
,
"Cluster"
,
(
gptr
*
)
&
g_clusters
,
(
gptr
*
)
&
g_clusters
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"replicate"
,
1024
,
"replicate"
,
(
gptr
*
)
&
g_dummy
,
(
gptr
*
)
&
g_dummy
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"log-file"
,
256
,
"log-file"
,
(
gptr
*
)
&
g_log_filename
,
(
gptr
*
)
&
g_log_filename
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"testcase-file"
,
'f'
,
"testcase-file"
,
(
gptr
*
)
&
g_test_case_filename
,
(
gptr
*
)
&
g_test_case_filename
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"report-file"
,
'r'
,
"report-file"
,
(
gptr
*
)
&
g_report_filename
,
(
gptr
*
)
&
g_report_filename
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"basedir"
,
256
,
"Base path"
,
(
gptr
*
)
&
g_basedir
,
(
gptr
*
)
&
g_basedir
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"baseport"
,
256
,
"Base port"
,
(
gptr
*
)
&
g_baseport
,
(
gptr
*
)
&
g_baseport
,
0
,
GET_INT
,
REQUIRED_ARG
,
g_baseport
,
0
,
0
,
0
,
0
,
0
},
{
"prefix"
,
256
,
"mysql install dir"
,
(
gptr
*
)
&
g_prefix
,
(
gptr
*
)
&
g_prefix
,
0
,
GET_STR
,
REQUIRED_ARG
,
0
,
0
,
0
,
0
,
0
,
0
},
{
"verbose"
,
'v'
,
"Verbosity"
,
(
gptr
*
)
&
g_verbosity
,
(
gptr
*
)
&
g_verbosity
,
0
,
GET_INT
,
REQUIRED_ARG
,
g_verbosity
,
0
,
0
,
0
,
0
,
0
},
{
"configure"
,
256
,
"configure"
,
(
gptr
*
)
&
g_do_setup
,
(
gptr
*
)
&
g_do_setup
,
0
,
GET_INT
,
REQUIRED_ARG
,
g_do_setup
,
0
,
0
,
0
,
0
,
0
},
{
"deploy"
,
256
,
"deploy"
,
(
gptr
*
)
&
g_do_deploy
,
(
gptr
*
)
&
g_do_deploy
,
0
,
GET_INT
,
REQUIRED_ARG
,
g_do_deploy
,
0
,
0
,
0
,
0
,
0
},
{
"sshx"
,
256
,
"sshx"
,
(
gptr
*
)
&
g_do_sshx
,
(
gptr
*
)
&
g_do_sshx
,
0
,
GET_INT
,
REQUIRED_ARG
,
g_do_sshx
,
0
,
0
,
0
,
0
,
0
},
{
"start"
,
256
,
"start"
,
(
gptr
*
)
&
g_do_start
,
(
gptr
*
)
&
g_do_start
,
0
,
GET_INT
,
REQUIRED_ARG
,
g_do_start
,
0
,
0
,
0
,
0
,
0
},
{
"fqpn"
,
256
,
"Fully qualified path-names "
,
(
gptr
*
)
&
g_fqpn
,
(
gptr
*
)
&
g_fqpn
,
0
,
GET_INT
,
REQUIRED_ARG
,
g_fqpn
,
0
,
0
,
0
,
0
,
0
},
{
"default-ports"
,
256
,
"Use default ports when possible"
,
(
gptr
*
)
&
g_default_ports
,
(
gptr
*
)
&
g_default_ports
,
0
,
GET_INT
,
REQUIRED_ARG
,
g_default_ports
,
0
,
0
,
0
,
0
,
0
},
{
"mode"
,
256
,
"Mode 0=interactive 1=regression 2=bench"
,
(
gptr
*
)
&
g_mode
,
(
gptr
*
)
&
g_mode
,
0
,
GET_INT
,
REQUIRED_ARG
,
g_mode
,
0
,
0
,
0
,
0
,
0
},
{
"quit"
,
256
,
"Quit before starting tests"
,
(
gptr
*
)
&
g_mode
,
(
gptr
*
)
&
g_do_quit
,
0
,
GET_BOOL
,
NO_ARG
,
g_do_quit
,
0
,
0
,
0
,
0
,
0
},
{
0
,
0
,
0
,
0
,
0
,
0
,
GET_NO_ARG
,
NO_ARG
,
0
,
0
,
0
,
0
,
0
,
0
}
};
const
int
arg_count
=
10
;
const
int
p_ndb
=
atrt_process
::
AP_NDB_MGMD
|
atrt_process
::
AP_NDBD
;
const
int
p_servers
=
atrt_process
::
AP_MYSQLD
;
const
int
p_clients
=
atrt_process
::
AP_CLIENT
|
atrt_process
::
AP_NDB_API
;
int
main
(
int
argc
,
const
char
**
argv
){
main
(
int
argc
,
char
**
argv
)
{
ndb_init
();
bool
restart
=
true
;
int
lineno
=
1
;
int
test_no
=
1
;
int
return_code
=
1
;
const
int
p_ndb
=
atrt_process
::
NDB_MGM
|
atrt_process
::
NDB_DB
;
const
int
p_servers
=
atrt_process
::
MYSQL_SERVER
|
atrt_process
::
NDB_REP
;
const
int
p_clients
=
atrt_process
::
MYSQL_CLIENT
|
atrt_process
::
NDB_API
;
g_logger
.
setCategory
(
progname
);
g_logger
.
enable
(
Logger
::
LL_ALL
);
g_logger
.
createConsoleHandler
();
if
(
!
parse_args
(
argc
,
argv
))
goto
end
;
g_logger
.
info
(
"Starting..."
);
if
(
!
setup_config
(
g_config
))
g_config
.
m_generated
=
false
;
g_config
.
m_replication
=
g_replicate
;
if
(
!
setup_config
(
g_config
))
goto
end
;
if
(
!
configure
(
g_config
,
g_do_setup
))
goto
end
;
g_logger
.
info
(
"Setting up directories"
);
if
(
!
setup_directories
(
g_config
,
g_do_setup
))
goto
end
;
if
(
g_do_setup
)
{
g_logger
.
info
(
"Setting up files"
);
if
(
!
setup_files
(
g_config
,
g_do_setup
,
g_do_sshx
))
goto
end
;
}
if
(
g_do_deploy
)
{
if
(
!
deploy
(
g_config
))
goto
end
;
}
if
(
g_do_quit
)
{
return_code
=
0
;
goto
end
;
}
if
(
!
setup_hosts
(
g_config
))
goto
end
;
if
(
g_do_sshx
)
{
g_logger
.
info
(
"Starting xterm-ssh"
);
if
(
!
sshx
(
g_config
,
g_do_sshx
))
goto
end
;
g_logger
.
info
(
"Done...sleeping"
);
while
(
true
)
{
NdbSleep_SecSleep
(
1
);
}
return_code
=
0
;
goto
end
;
}
g_logger
.
info
(
"Connecting to hosts"
);
if
(
!
connect_hosts
(
g_config
))
goto
end
;
if
(
!
setup_hosts
(
g_config
))
if
(
g_do_start
&&
!
g_test_case_filename
)
{
g_logger
.
info
(
"Starting server processes: %x"
,
g_do_start
);
if
(
!
start
(
g_config
,
g_do_start
))
goto
end
;
g_logger
.
info
(
"Done...sleeping"
);
while
(
true
)
{
NdbSleep_SecSleep
(
1
);
}
return_code
=
0
;
goto
end
;
}
return_code
=
0
;
/**
* Main loop
*/
...
...
@@ -114,37 +235,22 @@ main(int argc, const char ** argv){
* Do we need to restart ndb
*/
if
(
restart
){
g_logger
.
info
(
"(Re)starting
ndb
processes"
);
g_logger
.
info
(
"(Re)starting
server processes
processes"
);
if
(
!
stop_processes
(
g_config
,
~
0
))
goto
end
;
if
(
!
start_processes
(
g_config
,
atrt_process
::
NDB_MGM
))
goto
end
;
if
(
!
connect_ndb_mgm
(
g_config
)){
if
(
!
setup_directories
(
g_config
,
2
))
goto
end
;
}
if
(
!
start_processes
(
g_config
,
atrt_process
::
NDB_DB
))
if
(
!
setup_files
(
g_config
,
2
,
1
))
goto
end
;
if
(
!
wait_ndb
(
g_config
,
NDB_MGM_NODE_STATUS_NOT_STARTED
))
goto
end
;
for
(
Uint32
i
=
0
;
i
<
3
;
i
++
)
if
(
wait_ndb
(
g_config
,
NDB_MGM_NODE_STATUS_STARTED
))
goto
started
;
goto
end
;
started:
if
(
!
start_processes
(
g_config
,
p_servers
))
goto
end
;
g_logger
.
info
(
"Ndb start completed"
);
if
(
!
start
(
g_config
,
p_ndb
|
p_servers
))
goto
end
;
g_logger
.
info
(
"All servers start completed"
);
}
const
int
start_line
=
lineno
;
//
const int start_line = lineno;
atrt_testcase
test_case
;
if
(
!
read_test_case
(
g_test_case_file
,
test_case
,
lineno
))
goto
end
;
...
...
@@ -165,7 +271,7 @@ main(int argc, const char ** argv){
const
time_t
start
=
time
(
0
);
time_t
now
=
start
;
do
{
if
(
!
update_status
(
g_config
,
atrt_process
::
ALL
))
if
(
!
update_status
(
g_config
,
atrt_process
::
A
P_A
LL
))
goto
end
;
int
count
=
0
;
...
...
@@ -189,7 +295,7 @@ main(int argc, const char ** argv){
result
=
ERR_MAX_TIME_ELAPSED
;
break
;
}
s
leep
(
1
);
NdbSleep_SecS
leep
(
1
);
}
while
(
true
);
const
time_t
elapsed
=
time
(
0
)
-
start
;
...
...
@@ -197,7 +303,8 @@ main(int argc, const char ** argv){
if
(
!
stop_processes
(
g_config
,
p_clients
))
goto
end
;
if
(
!
gather_result
(
g_config
,
&
result
))
int
tmp
,
*
rp
=
result
?
&
tmp
:
&
result
;
if
(
!
gather_result
(
g_config
,
rp
))
goto
end
;
g_logger
.
info
(
"#%d %s(%d)"
,
...
...
@@ -205,29 +312,35 @@ main(int argc, const char ** argv){
(
result
==
0
?
"OK"
:
"FAILED"
),
result
);
if
(
g_report_file
!=
0
){
fprintf
(
g_report_file
,
"%s %s ; %d ; %d ; %ld
\n
"
,
test_case
.
m_command
.
c_str
(),
test_case
.
m_args
.
c_str
(),
test_no
,
result
,
elapsed
);
fprintf
(
g_report_file
,
"%s ; %d ; %d ; %ld
\n
"
,
test_case
.
m_name
.
c_str
(),
test_no
,
result
,
elapsed
);
fflush
(
g_report_file
);
}
if
(
test_case
.
m_report
||
g_mode_bench
||
(
g_mode_regression
&&
result
)){
BaseString
tmp
;
tmp
.
assfmt
(
"result.%d"
,
test_no
);
if
(
rename
(
"result"
,
tmp
.
c_str
())
!=
0
){
g_logger
.
critical
(
"Failed to rename %s as %s"
,
"result"
,
tmp
.
c_str
());
goto
end
;
}
}
if
(
g_mode_interactive
&&
result
){
if
(
g_mode
==
0
&&
result
){
g_logger
.
info
(
"Encountered failed test in interactive mode - terminating"
);
break
;
}
BaseString
resdir
;
resdir
.
assfmt
(
"result.%d"
,
test_no
);
remove_dir
(
resdir
.
c_str
(),
true
);
if
(
test_case
.
m_report
||
g_mode
==
2
||
(
g_mode
&&
result
))
{
if
(
rename
(
"result"
,
resdir
.
c_str
())
!=
0
)
{
g_logger
.
critical
(
"Failed to rename %s as %s"
,
"result"
,
resdir
.
c_str
());
goto
end
;
}
}
else
{
remove_dir
(
"result"
,
true
);
}
if
(
result
!=
0
){
restart
=
true
;
}
else
{
...
...
@@ -247,276 +360,254 @@ main(int argc, const char ** argv){
g_test_case_file
=
0
;
}
stop_processes
(
g_config
,
atrt_process
::
ALL
);
stop_processes
(
g_config
,
atrt_process
::
AP_ALL
);
return
return_code
;
}
static
my_bool
get_one_option
(
int
arg
,
const
struct
my_option
*
opt
,
char
*
value
)
{
if
(
arg
==
1024
)
{
if
(
g_replicate
.
length
())
g_replicate
.
append
(
";"
);
g_replicate
.
append
(
value
);
return
1
;
}
return
0
;
}
bool
parse_args
(
int
argc
,
const
char
**
argv
){
int
optind
=
0
;
if
(
getarg
(
args
,
arg_count
,
argc
,
argv
,
&
optind
))
{
arg_printusage
(
args
,
arg_count
,
progname
,
""
);
parse_args
(
int
argc
,
char
**
argv
)
{
char
buf
[
2048
];
if
(
getcwd
(
buf
,
sizeof
(
buf
))
==
0
)
{
g_logger
.
error
(
"Unable to get current working directory"
);
return
false
;
}
if
(
g_log_filename
!=
0
){
g_logger
.
removeConsoleHandler
();
g_logger
.
addHandler
(
new
FileLogHandler
(
g_log_filename
));
g_cwd
=
strdup
(
buf
);
struct
stat
sbuf
;
BaseString
mycnf
;
if
(
argc
>
1
&&
lstat
(
argv
[
argc
-
1
],
&
sbuf
)
==
0
)
{
mycnf
.
append
(
g_cwd
);
mycnf
.
append
(
PATH_SEPARATOR
);
mycnf
.
append
(
argv
[
argc
-
1
]);
}
else
{
int
tmp
=
Logger
::
LL_WARNING
-
g_verbosity
;
tmp
=
(
tmp
<
Logger
::
LL_DEBUG
?
Logger
::
LL_DEBUG
:
tmp
);
g_logger
.
disable
(
Logger
::
LL_ALL
);
g_logger
.
enable
(
Logger
::
LL_ON
);
g_logger
.
enable
((
Logger
::
LoggerLevel
)
tmp
,
Logger
::
LL_ALERT
);
mycnf
.
append
(
g_cwd
);
mycnf
.
append
(
PATH_SEPARATOR
);
mycnf
.
append
(
"my.cnf"
);
if
(
lstat
(
mycnf
.
c_str
(),
&
sbuf
)
!=
0
)
{
g_logger
.
error
(
"Unable to stat %s"
,
mycnf
.
c_str
());
return
false
;
}
}
g_logger
.
info
(
"Bootstrapping using %s"
,
mycnf
.
c_str
());
const
char
*
groups
[]
=
{
"atrt"
,
0
};
int
ret
=
load_defaults
(
mycnf
.
c_str
(),
groups
,
&
argc
,
&
argv
);
save_file
=
defaults_file
;
save_extra_file
=
defaults_extra_file
;
save_group_suffix
=
defaults_group_suffix
;
if
(
!
g_process_config_filename
){
g_logger
.
critical
(
"Process config not specified!"
);
if
(
save_extra_file
)
{
g_logger
.
error
(
"--defaults-extra-file(%s) is not supported..."
,
save_extra_file
);
return
false
;
}
if
(
!
g_setup_path
){
char
buf
[
1024
];
if
(
getcwd
(
buf
,
sizeof
(
buf
))){
g_setup_path
=
strdup
(
buf
);
g_logger
.
info
(
"Setup path not specified, using %s"
,
buf
);
}
else
{
g_logger
.
critical
(
"Setup path not specified!
\n
"
);
return
false
;
}
}
if
(
g_report
&
!
g_report_filename
){
g_report_filename
=
"report.txt"
;
if
(
ret
||
handle_options
(
&
argc
,
&
argv
,
g_options
,
get_one_option
))
{
g_logger
.
error
(
"Failed to load defaults/handle_options"
);
return
false
;
}
if
(
g_report_filename
){
g_report_file
=
fopen
(
g_report_filename
,
"w"
);
if
(
g_report_file
==
0
){
g_logger
.
critical
(
"Unable to create report file: %s"
,
g_report_filename
);
return
false
;
if
(
argc
>=
2
)
{
const
char
*
arg
=
argv
[
argc
-
2
];
while
(
*
arg
)
{
switch
(
*
arg
){
case
'c'
:
g_do_setup
=
(
g_do_setup
==
0
)
?
1
:
g_do_setup
;
break
;
case
'C'
:
g_do_setup
=
2
;
break
;
case
'd'
:
g_do_deploy
=
1
;
break
;
case
'x'
:
g_do_sshx
=
atrt_process
::
AP_CLIENT
|
atrt_process
::
AP_NDB_API
;
break
;
case
'X'
:
g_do_sshx
=
atrt_process
::
AP_ALL
;
break
;
case
's'
:
g_do_start
=
p_ndb
;
break
;
case
'S'
:
g_do_start
=
p_ndb
|
p_servers
;
break
;
case
'f'
:
g_fqpn
=
1
;
break
;
case
'q'
:
g_do_quit
=
1
;
break
;
default:
g_logger
.
error
(
"Unknown switch '%c'"
,
*
arg
);
return
false
;
}
arg
++
;
}
}
if
(
g_test_case_filename
){
g_test_case_file
=
fopen
(
g_test_case_filename
,
"r"
);
if
(
g_test_case_file
==
0
){
g_logger
.
critical
(
"Unable to open file: %s"
,
g_test_case_filename
);
return
false
;
}
if
(
g_log_filename
!=
0
)
{
g_logger
.
removeConsoleHandler
();
g_logger
.
addHandler
(
new
FileLogHandler
(
g_log_filename
));
}
int
sum
=
g_mode_interactive
+
g_mode_regression
+
g_mode_bench
;
if
(
sum
==
0
){
g_mode_interactive
=
1
;
}
if
(
sum
>
1
){
g_logger
.
critical
(
"Only one of bench/regression/interactive can be specified"
);
return
false
;
{
int
tmp
=
Logger
::
LL_WARNING
-
g_verbosity
;
tmp
=
(
tmp
<
Logger
::
LL_DEBUG
?
Logger
::
LL_DEBUG
:
tmp
);
g_logger
.
disable
(
Logger
::
LL_ALL
);
g_logger
.
enable
(
Logger
::
LL_ON
);
g_logger
.
enable
((
Logger
::
LoggerLevel
)
tmp
,
Logger
::
LL_ALERT
);
}
g_default_user
=
strdup
(
getenv
(
"LOGNAME"
));
return
true
;
}
static
atrt_host
*
find
(
const
BaseString
&
host
,
Vector
<
atrt_host
>
&
hosts
){
for
(
size_t
i
=
0
;
i
<
hosts
.
size
();
i
++
){
if
(
hosts
[
i
].
m_hostname
==
host
){
return
&
hosts
[
i
];
}
if
(
!
g_basedir
)
{
g_basedir
=
g_cwd
;
g_logger
.
info
(
"basedir not specified, using %s"
,
g_basedir
);
}
return
0
;
}
bool
setup_config
(
atrt_config
&
config
){
FILE
*
f
=
fopen
(
g_process_config_filename
,
"r"
);
if
(
!
f
){
g_logger
.
critical
(
"Failed to open process config file: %s"
,
g_process_config_filename
);
return
false
;
if
(
!
g_prefix
)
{
g_prefix
=
DEFAULT_PREFIX
;
}
bool
result
=
true
;
int
lineno
=
0
;
char
buf
[
2048
];
BaseString
connect_string
;
int
mysql_port_offset
=
0
;
while
(
fgets
(
buf
,
2048
,
f
)){
lineno
++
;
BaseString
tmp
(
buf
);
tmp
.
trim
(
"
\t\n\r
"
);
if
(
tmp
.
length
()
==
0
||
tmp
==
""
||
tmp
.
c_str
()[
0
]
==
'#'
)
continue
;
Vector
<
BaseString
>
split1
;
if
(
tmp
.
split
(
split1
,
":"
,
2
)
!=
2
){
g_logger
.
warning
(
"Invalid line %d in %s - ignoring"
,
lineno
,
g_process_config_filename
);
continue
;
/**
* Add path to atrt-*.sh
*/
{
BaseString
tmp
;
const
char
*
env
=
getenv
(
"PATH"
);
if
(
env
&&
strlen
(
env
))
{
tmp
.
assfmt
(
"PATH=%s:%s/mysql-test/ndb"
,
env
,
g_prefix
);
}
if
(
split1
[
0
].
trim
()
==
"basedir"
){
g_default_base_dir
=
strdup
(
split1
[
1
].
trim
().
c_str
());
continue
;
else
{
tmp
.
assfmt
(
"PATH=%s/mysql-test/ndb"
,
g_prefix
);
}
g_env_path
=
strdup
(
tmp
.
c_str
());
putenv
(
g_env_path
);
}
if
(
g_help
)
{
my_print_help
(
g_options
);
my_print_variables
(
g_options
);
return
0
;
}
if
(
split1
[
0
].
trim
()
==
"baseport"
){
g_default_base_port
=
atoi
(
split1
[
1
].
trim
().
c_str
());
continue
;
if
(
g_test_case_filename
)
{
g_test_case_file
=
fopen
(
g_test_case_filename
,
"r"
);
if
(
g_test_case_file
==
0
)
{
g_logger
.
critical
(
"Unable to open file: %s"
,
g_test_case_filename
);
return
false
;
}
if
(
g_do_setup
==
0
)
g_do_setup
=
2
;
if
(
g_do_start
==
0
)
g_do_start
=
p_ndb
|
p_servers
;
if
(
g_mode
==
0
)
g_mode
=
1
;
if
(
split1
[
0
].
trim
()
==
"user"
){
g_default_user
=
strdup
(
split1
[
1
].
trim
().
c_str
());
continue
;
if
(
g_do_sshx
)
{
g_logger
.
critical
(
"ssx specified...not possible with testfile"
);
return
false
;
}
if
(
split1
[
0
].
trim
()
==
"mysqld-use-base"
&&
split1
[
1
].
trim
()
==
"no"
){
g_mysqld_use_base
=
0
;
continue
;
}
if
(
g_do_setup
==
0
)
{
BaseString
tmp
;
tmp
.
append
(
g_basedir
);
tmp
.
append
(
PATH_SEPARATOR
);
tmp
.
append
(
"my.cnf"
);
if
(
lstat
(
tmp
.
c_str
(),
&
sbuf
)
!=
0
)
{
g_logger
.
error
(
"Unable to stat %s"
,
tmp
.
c_str
());
return
false
;
}
Vector
<
BaseString
>
hosts
;
if
(
split1
[
1
].
trim
().
split
(
hosts
)
<=
0
)
{
g_logger
.
warning
(
"Invalid line %d in %s - ignoring"
,
lineno
,
g_process_config_filename
)
;
if
(
!
S_ISREG
(
sbuf
.
st_mode
))
{
g_logger
.
error
(
"%s is not a regular file"
,
tmp
.
c_str
());
return
false
;
}
// 1 - Check hosts
for
(
size_t
i
=
0
;
i
<
hosts
.
size
();
i
++
){
Vector
<
BaseString
>
tmp
;
hosts
[
i
].
split
(
tmp
,
":"
);
BaseString
hostname
=
tmp
[
0
].
trim
();
BaseString
base_dir
;
if
(
tmp
.
size
()
>=
2
)
base_dir
=
tmp
[
1
];
else
if
(
g_default_base_dir
==
0
){
g_logger
.
critical
(
"Basedir not specified..."
);
return
false
;
}
atrt_host
*
host_ptr
;
if
((
host_ptr
=
find
(
hostname
,
config
.
m_hosts
))
==
0
){
atrt_host
host
;
host
.
m_index
=
config
.
m_hosts
.
size
();
host
.
m_cpcd
=
new
SimpleCpcClient
(
hostname
.
c_str
(),
1234
);
host
.
m_base_dir
=
(
base_dir
.
empty
()
?
g_default_base_dir
:
base_dir
);
host
.
m_user
=
g_default_user
;
host
.
m_hostname
=
hostname
.
c_str
();
config
.
m_hosts
.
push_back
(
host
);
}
else
{
if
(
!
base_dir
.
empty
()
&&
(
base_dir
==
host_ptr
->
m_base_dir
)){
g_logger
.
critical
(
"Inconsistent base dir definition for host %s"
",
\"
%s
\"
!=
\"
%s
\"
"
,
hostname
.
c_str
(),
base_dir
.
c_str
(),
host_ptr
->
m_base_dir
.
c_str
());
return
false
;
}
}
}
for
(
size_t
i
=
0
;
i
<
hosts
.
size
();
i
++
){
BaseString
&
tmp
=
hosts
[
i
];
atrt_host
*
host
=
find
(
tmp
,
config
.
m_hosts
);
BaseString
&
dir
=
host
->
m_base_dir
;
const
int
index
=
config
.
m_processes
.
size
()
+
1
;
atrt_process
proc
;
proc
.
m_index
=
index
;
proc
.
m_host
=
host
;
proc
.
m_proc
.
m_id
=
-
1
;
proc
.
m_proc
.
m_type
=
"temporary"
;
proc
.
m_proc
.
m_owner
=
"atrt"
;
proc
.
m_proc
.
m_group
=
"group"
;
proc
.
m_proc
.
m_cwd
.
assign
(
dir
).
append
(
"/run/"
);
proc
.
m_proc
.
m_stdout
=
"log.out"
;
proc
.
m_proc
.
m_stderr
=
"2>&1"
;
proc
.
m_proc
.
m_runas
=
proc
.
m_host
->
m_user
;
proc
.
m_proc
.
m_ulimit
=
"c:unlimited"
;
proc
.
m_proc
.
m_env
.
assfmt
(
"MYSQL_BASE_DIR=%s"
,
dir
.
c_str
());
proc
.
m_proc
.
m_shutdown_options
=
""
;
proc
.
m_hostname
=
proc
.
m_host
->
m_hostname
;
proc
.
m_ndb_mgm_port
=
g_default_base_port
;
if
(
split1
[
0
]
==
"mgm"
){
proc
.
m_type
=
atrt_process
::
NDB_MGM
;
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
index
,
"ndb_mgmd"
);
proc
.
m_proc
.
m_path
.
assign
(
dir
).
append
(
"/libexec/ndb_mgmd"
);
proc
.
m_proc
.
m_args
=
"--nodaemon -f config.ini"
;
proc
.
m_proc
.
m_cwd
.
appfmt
(
"%d.ndb_mgmd"
,
index
);
connect_string
.
appfmt
(
"host=%s:%d;"
,
proc
.
m_hostname
.
c_str
(),
proc
.
m_ndb_mgm_port
);
}
else
if
(
split1
[
0
]
==
"ndb"
){
proc
.
m_type
=
atrt_process
::
NDB_DB
;
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
index
,
"ndbd"
);
proc
.
m_proc
.
m_path
.
assign
(
dir
).
append
(
"/libexec/ndbd"
);
proc
.
m_proc
.
m_args
=
"--initial --nodaemon -n"
;
proc
.
m_proc
.
m_cwd
.
appfmt
(
"%d.ndbd"
,
index
);
}
else
if
(
split1
[
0
]
==
"mysqld"
){
proc
.
m_type
=
atrt_process
::
MYSQL_SERVER
;
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
index
,
"mysqld"
);
proc
.
m_proc
.
m_path
.
assign
(
dir
).
append
(
"/libexec/mysqld"
);
proc
.
m_proc
.
m_args
=
"--core-file --ndbcluster"
;
proc
.
m_proc
.
m_cwd
.
appfmt
(
"%d.mysqld"
,
index
);
proc
.
m_proc
.
m_shutdown_options
=
"SIGKILL"
;
// not nice
}
else
if
(
split1
[
0
]
==
"api"
){
proc
.
m_type
=
atrt_process
::
NDB_API
;
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
index
,
"ndb_api"
);
proc
.
m_proc
.
m_path
=
""
;
proc
.
m_proc
.
m_args
=
""
;
proc
.
m_proc
.
m_cwd
.
appfmt
(
"%d.ndb_api"
,
index
);
}
else
if
(
split1
[
0
]
==
"mysql"
){
proc
.
m_type
=
atrt_process
::
MYSQL_CLIENT
;
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
index
,
"mysql"
);
proc
.
m_proc
.
m_path
=
""
;
proc
.
m_proc
.
m_args
=
""
;
proc
.
m_proc
.
m_cwd
.
appfmt
(
"%d.mysql"
,
index
);
}
else
{
g_logger
.
critical
(
"%s:%d: Unhandled process type: %s"
,
g_process_config_filename
,
lineno
,
split1
[
0
].
c_str
());
result
=
false
;
goto
end
;
}
config
.
m_processes
.
push_back
(
proc
);
g_my_cnf
=
strdup
(
tmp
.
c_str
());
g_logger
.
info
(
"Using %s"
,
tmp
.
c_str
());
}
else
{
g_my_cnf
=
strdup
(
mycnf
.
c_str
());
}
g_logger
.
info
(
"Using --prefix=
\"
%s
\"
"
,
g_prefix
);
if
(
g_report_filename
)
{
g_report_file
=
fopen
(
g_report_filename
,
"w"
);
if
(
g_report_file
==
0
)
{
g_logger
.
critical
(
"Unable to create report file: %s"
,
g_report_filename
);
return
false
;
}
}
// Setup connect string
for
(
size_t
i
=
0
;
i
<
config
.
m_processes
.
size
();
i
++
)
{
config
.
m_processes
[
i
].
m_proc
.
m_env
.
appfmt
(
" NDB_CONNECTSTRING=%s"
,
connect_string
.
c_str
())
;
if
(
g_clusters
==
0
)
{
g_logger
.
critical
(
"No clusters specified"
);
return
false
;
}
end:
fclose
(
f
);
return
result
;
g_user
=
strdup
(
getenv
(
"LOGNAME"
));
return
true
;
}
bool
connect_hosts
(
atrt_config
&
config
){
for
(
size_t
i
=
0
;
i
<
config
.
m_hosts
.
size
();
i
++
){
if
(
config
.
m_hosts
[
i
]
.
m_cpcd
->
connect
()
!=
0
){
if
(
config
.
m_hosts
[
i
]
->
m_cpcd
->
connect
()
!=
0
){
g_logger
.
error
(
"Unable to connect to cpc %s:%d"
,
config
.
m_hosts
[
i
]
.
m_cpcd
->
getHost
(),
config
.
m_hosts
[
i
]
.
m_cpcd
->
getPort
());
config
.
m_hosts
[
i
]
->
m_cpcd
->
getHost
(),
config
.
m_hosts
[
i
]
->
m_cpcd
->
getPort
());
return
false
;
}
g_logger
.
debug
(
"Connected to %s:%d"
,
config
.
m_hosts
[
i
]
.
m_cpcd
->
getHost
(),
config
.
m_hosts
[
i
]
.
m_cpcd
->
getPort
());
config
.
m_hosts
[
i
]
->
m_cpcd
->
getHost
(),
config
.
m_hosts
[
i
]
->
m_cpcd
->
getPort
());
}
return
true
;
...
...
@@ -529,8 +620,10 @@ connect_ndb_mgm(atrt_process & proc){
g_logger
.
critical
(
"Unable to create mgm handle"
);
return
false
;
}
BaseString
tmp
=
proc
.
m_hostname
;
tmp
.
appfmt
(
":%d"
,
proc
.
m_ndb_mgm_port
);
BaseString
tmp
=
proc
.
m_host
->
m_hostname
;
const
char
*
val
;
proc
.
m_options
.
m_loaded
.
get
(
"--PortNumber="
,
&
val
);
tmp
.
appfmt
(
":%s"
,
val
);
if
(
ndb_mgm_set_connectstring
(
handle
,
tmp
.
c_str
()))
{
...
...
@@ -551,8 +644,8 @@ connect_ndb_mgm(atrt_process & proc){
bool
connect_ndb_mgm
(
atrt_config
&
config
){
for
(
size_t
i
=
0
;
i
<
config
.
m_processes
.
size
();
i
++
){
atrt_process
&
proc
=
config
.
m_processes
[
i
];
if
((
proc
.
m_type
&
atrt_process
::
NDB_MGM
)
!=
0
){
atrt_process
&
proc
=
*
config
.
m_processes
[
i
];
if
((
proc
.
m_type
&
atrt_process
::
AP_NDB_MGMD
)
!=
0
){
if
(
!
connect_ndb_mgm
(
proc
)){
return
false
;
}
...
...
@@ -573,100 +666,110 @@ wait_ndb(atrt_config& config, int goal){
goal
=
remap
(
goal
);
/**
* Get mgm handle for cluster
*/
NdbMgmHandle
handle
=
0
;
for
(
size_t
i
=
0
;
i
<
config
.
m_processes
.
size
();
i
++
){
atrt_process
&
proc
=
config
.
m_processes
[
i
];
if
((
proc
.
m_type
&
atrt_process
::
NDB_MGM
)
!=
0
){
handle
=
proc
.
m_ndb_mgm_handle
;
break
;
}
}
if
(
handle
==
0
){
g_logger
.
critical
(
"Unable to find mgm handle"
);
return
false
;
}
if
(
goal
==
NDB_MGM_NODE_STATUS_STARTED
){
size_t
cnt
=
0
;
for
(
size_t
i
=
0
;
i
<
config
.
m_clusters
.
size
();
i
++
)
{
atrt_cluster
*
cluster
=
config
.
m_clusters
[
i
];
/**
* 1) wait NOT_STARTED
* 2) send start
* 3) wait STARTED
* Get mgm handle for cluster
*/
if
(
!
wait_ndb
(
config
,
NDB_MGM_NODE_STATUS_NOT_STARTED
))
NdbMgmHandle
handle
=
0
;
for
(
size_t
j
=
0
;
j
<
cluster
->
m_processes
.
size
();
j
++
){
atrt_process
&
proc
=
*
cluster
->
m_processes
[
j
];
if
((
proc
.
m_type
&
atrt_process
::
AP_NDB_MGMD
)
!=
0
){
handle
=
proc
.
m_ndb_mgm_handle
;
break
;
}
}
if
(
handle
==
0
){
g_logger
.
critical
(
"Unable to find mgm handle"
);
return
false
;
}
ndb_mgm_start
(
handle
,
0
,
0
);
}
struct
ndb_mgm_cluster_state
*
state
;
time_t
now
=
time
(
0
);
time_t
end
=
now
+
360
;
int
min
=
remap
(
NDB_MGM_NODE_STATUS_NO_CONTACT
);
int
min2
=
goal
;
while
(
now
<
end
){
/**
* 1) retreive current state
*/
state
=
0
;
do
{
state
=
ndb_mgm_get_status
(
handle
);
if
(
state
==
0
){
const
int
err
=
ndb_mgm_get_latest_error
(
handle
);
g_logger
.
error
(
"Unable to poll db state: %d %s %s"
,
ndb_mgm_get_latest_error
(
handle
),
ndb_mgm_get_latest_error_msg
(
handle
),
ndb_mgm_get_latest_error_desc
(
handle
));
if
(
err
==
NDB_MGM_SERVER_NOT_CONNECTED
&&
connect_ndb_mgm
(
config
)){
g_logger
.
error
(
"Reconnected..."
);
continue
;
}
if
(
goal
==
NDB_MGM_NODE_STATUS_STARTED
){
/**
* 1) wait NOT_STARTED
* 2) send start
* 3) wait STARTED
*/
if
(
!
wait_ndb
(
config
,
NDB_MGM_NODE_STATUS_NOT_STARTED
))
return
false
;
}
}
while
(
state
==
0
);
NdbAutoPtr
<
void
>
tmp
(
state
);
ndb_mgm_start
(
handle
,
0
,
0
);
}
struct
ndb_mgm_cluster_state
*
state
;
time_t
now
=
time
(
0
);
time_t
end
=
now
+
360
;
int
min
=
remap
(
NDB_MGM_NODE_STATUS_NO_CONTACT
);
int
min2
=
goal
;
min2
=
goal
;
for
(
int
i
=
0
;
i
<
state
->
no_of_nodes
;
i
++
){
if
(
state
->
node_states
[
i
].
node_type
==
NDB_MGM_NODE_TYPE_NDB
){
const
int
s
=
remap
(
state
->
node_states
[
i
].
node_status
);
min2
=
(
min2
<
s
?
min2
:
s
);
if
(
s
<
remap
(
NDB_MGM_NODE_STATUS_NO_CONTACT
)
||
s
>
NDB_MGM_NODE_STATUS_STARTED
){
g_logger
.
critical
(
"Strange DB status during start: %d %d"
,
i
,
min2
);
while
(
now
<
end
){
/**
* 1) retreive current state
*/
state
=
0
;
do
{
state
=
ndb_mgm_get_status
(
handle
);
if
(
state
==
0
){
const
int
err
=
ndb_mgm_get_latest_error
(
handle
);
g_logger
.
error
(
"Unable to poll db state: %d %s %s"
,
ndb_mgm_get_latest_error
(
handle
),
ndb_mgm_get_latest_error_msg
(
handle
),
ndb_mgm_get_latest_error_desc
(
handle
));
if
(
err
==
NDB_MGM_SERVER_NOT_CONNECTED
&&
connect_ndb_mgm
(
config
)){
g_logger
.
error
(
"Reconnected..."
);
continue
;
}
return
false
;
}
if
(
min2
<
min
){
g_logger
.
critical
(
"wait ndb failed node: %d %d %d %d"
,
state
->
node_states
[
i
].
node_id
,
min
,
min2
,
goal
);
}
while
(
state
==
0
);
NdbAutoPtr
<
void
>
tmp
(
state
);
min2
=
goal
;
for
(
int
j
=
0
;
j
<
state
->
no_of_nodes
;
j
++
){
if
(
state
->
node_states
[
j
].
node_type
==
NDB_MGM_NODE_TYPE_NDB
){
const
int
s
=
remap
(
state
->
node_states
[
j
].
node_status
);
min2
=
(
min2
<
s
?
min2
:
s
);
if
(
s
<
remap
(
NDB_MGM_NODE_STATUS_NO_CONTACT
)
||
s
>
NDB_MGM_NODE_STATUS_STARTED
){
g_logger
.
critical
(
"Strange DB status during start: %d %d"
,
j
,
min2
);
return
false
;
}
if
(
min2
<
min
){
g_logger
.
critical
(
"wait ndb failed node: %d %d %d %d"
,
state
->
node_states
[
j
].
node_id
,
min
,
min2
,
goal
);
}
}
}
if
(
min2
<
min
){
g_logger
.
critical
(
"wait ndb failed %d %d %d"
,
min
,
min2
,
goal
);
return
false
;
}
if
(
min2
==
goal
){
cnt
++
;
goto
next
;
}
min
=
min2
;
now
=
time
(
0
);
}
if
(
min2
<
min
){
g_logger
.
critical
(
"wait ndb failed %d %d %d"
,
min
,
min2
,
goal
);
return
false
;
}
if
(
min2
==
goal
){
return
true
;
break
;
}
min
=
min2
;
now
=
time
(
0
);
g_logger
.
critical
(
"wait ndb timed out %d %d %d"
,
min
,
min2
,
goal
);
break
;
next:
;
}
g_logger
.
critical
(
"wait ndb timed out %d %d %d"
,
min
,
min2
,
goal
);
return
false
;
return
cnt
==
config
.
m_clusters
.
size
();
}
bool
...
...
@@ -676,21 +779,19 @@ start_process(atrt_process & proc){
return
false
;
}
BaseString
path
=
proc
.
m_proc
.
m_cwd
.
substr
(
proc
.
m_host
->
m_base_dir
.
length
()
+
BaseString
(
"/run"
).
length
());
BaseString
tmp
=
g_setup_progname
;
tmp
.
appfmt
(
" %s %s/
%s/
%s"
,
tmp
.
appfmt
(
" %s %s/ %s"
,
proc
.
m_host
->
m_hostname
.
c_str
(),
g_setup_path
,
path
.
c_str
(),
proc
.
m_proc
.
m_cwd
.
c_str
(),
proc
.
m_proc
.
m_cwd
.
c_str
());
g_logger
.
debug
(
"system(%s)"
,
tmp
.
c_str
());
const
int
r1
=
system
(
tmp
.
c_str
());
if
(
r1
!=
0
){
g_logger
.
critical
(
"Failed to setup process"
);
return
false
;
}
{
Properties
reply
;
if
(
proc
.
m_host
->
m_cpcd
->
define_process
(
proc
.
m_proc
,
reply
)
!=
0
){
...
...
@@ -715,7 +816,7 @@ start_process(atrt_process & proc){
bool
start_processes
(
atrt_config
&
config
,
int
types
){
for
(
size_t
i
=
0
;
i
<
config
.
m_processes
.
size
();
i
++
){
atrt_process
&
proc
=
config
.
m_processes
[
i
];
atrt_process
&
proc
=
*
config
.
m_processes
[
i
];
if
((
types
&
proc
.
m_type
)
!=
0
&&
proc
.
m_proc
.
m_path
!=
""
){
if
(
!
start_process
(
proc
)){
return
false
;
...
...
@@ -760,7 +861,7 @@ stop_process(atrt_process & proc){
bool
stop_processes
(
atrt_config
&
config
,
int
types
){
for
(
size_t
i
=
0
;
i
<
config
.
m_processes
.
size
();
i
++
){
atrt_process
&
proc
=
config
.
m_processes
[
i
];
atrt_process
&
proc
=
*
config
.
m_processes
[
i
];
if
((
types
&
proc
.
m_type
)
!=
0
){
if
(
!
stop_process
(
proc
)){
return
false
;
...
...
@@ -779,11 +880,11 @@ update_status(atrt_config& config, int){
m_procs
.
fill
(
config
.
m_hosts
.
size
(),
dummy
);
for
(
size_t
i
=
0
;
i
<
config
.
m_hosts
.
size
();
i
++
){
Properties
p
;
config
.
m_hosts
[
i
]
.
m_cpcd
->
list_processes
(
m_procs
[
i
],
p
);
config
.
m_hosts
[
i
]
->
m_cpcd
->
list_processes
(
m_procs
[
i
],
p
);
}
for
(
size_t
i
=
0
;
i
<
config
.
m_processes
.
size
();
i
++
){
atrt_process
&
proc
=
config
.
m_processes
[
i
];
atrt_process
&
proc
=
*
config
.
m_processes
[
i
];
if
(
proc
.
m_proc
.
m_id
!=
-
1
){
Vector
<
SimpleCpcClient
::
Process
>
&
h_procs
=
m_procs
[
proc
.
m_host
->
m_index
];
bool
found
=
false
;
...
...
@@ -798,7 +899,7 @@ update_status(atrt_config& config, int){
g_logger
.
error
(
"update_status: not found"
);
g_logger
.
error
(
"id: %d host: %s cmd: %s"
,
proc
.
m_proc
.
m_id
,
proc
.
m_hostname
.
c_str
(),
proc
.
m_host
->
m_host
name
.
c_str
(),
proc
.
m_proc
.
m_path
.
c_str
());
for
(
size_t
j
=
0
;
j
<
h_procs
.
size
();
j
++
){
g_logger
.
error
(
"found: %d %s"
,
h_procs
[
j
].
m_id
,
...
...
@@ -815,7 +916,7 @@ int
is_running
(
atrt_config
&
config
,
int
types
){
int
found
=
0
,
running
=
0
;
for
(
size_t
i
=
0
;
i
<
config
.
m_processes
.
size
();
i
++
){
atrt_process
&
proc
=
config
.
m_processes
[
i
];
atrt_process
&
proc
=
*
config
.
m_processes
[
i
];
if
((
types
&
proc
.
m_type
)
!=
0
){
found
++
;
if
(
proc
.
m_proc
.
m_status
==
"running"
)
...
...
@@ -910,12 +1011,24 @@ read_test_case(FILE * file, atrt_testcase& tc, int& line){
tc
.
m_run_all
=
true
;
else
tc
.
m_run_all
=
false
;
if
(
!
p
.
get
(
"name"
,
&
mt
))
{
tc
.
m_name
.
assfmt
(
"%s %s"
,
tc
.
m_command
.
c_str
(),
tc
.
m_args
.
c_str
());
}
else
{
tc
.
m_name
.
assign
(
mt
);
}
return
true
;
}
bool
setup_test_case
(
atrt_config
&
config
,
const
atrt_testcase
&
tc
){
g_logger
.
debug
(
"system(%s)"
,
g_clear_progname
);
const
int
r1
=
system
(
g_clear_progname
);
if
(
r1
!=
0
){
g_logger
.
critical
(
"Failed to clear result"
);
...
...
@@ -923,19 +1036,24 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){
}
size_t
i
=
0
;
for
(;
i
<
config
.
m_processes
.
size
();
i
++
){
atrt_process
&
proc
=
config
.
m_processes
[
i
];
if
(
proc
.
m_type
==
atrt_process
::
NDB_API
||
proc
.
m_type
==
atrt_process
::
MYSQL_CLIENT
){
proc
.
m_proc
.
m_path
.
assfmt
(
"%s/bin/%s"
,
proc
.
m_host
->
m_base_dir
.
c_str
(),
tc
.
m_command
.
c_str
());
for
(;
i
<
config
.
m_processes
.
size
();
i
++
)
{
atrt_process
&
proc
=
*
config
.
m_processes
[
i
];
if
(
proc
.
m_type
==
atrt_process
::
AP_NDB_API
||
proc
.
m_type
==
atrt_process
::
AP_CLIENT
){
proc
.
m_proc
.
m_path
=
""
;
if
(
tc
.
m_command
.
c_str
()[
0
]
!=
'/'
)
{
proc
.
m_proc
.
m_path
.
appfmt
(
"%s/bin/"
,
g_prefix
);
}
proc
.
m_proc
.
m_path
.
append
(
tc
.
m_command
.
c_str
());
proc
.
m_proc
.
m_args
.
assign
(
tc
.
m_args
);
if
(
!
tc
.
m_run_all
)
break
;
}
}
for
(
i
++
;
i
<
config
.
m_processes
.
size
();
i
++
){
atrt_process
&
proc
=
config
.
m_processes
[
i
];
if
(
proc
.
m_type
==
atrt_process
::
NDB_API
||
proc
.
m_type
==
atrt_process
::
MYSQL
_CLIENT
){
atrt_process
&
proc
=
*
config
.
m_processes
[
i
];
if
(
proc
.
m_type
==
atrt_process
::
AP_NDB_API
||
proc
.
m_type
==
atrt_process
::
AP
_CLIENT
){
proc
.
m_proc
.
m_path
.
assign
(
""
);
proc
.
m_proc
.
m_args
.
assign
(
""
);
}
...
...
@@ -946,24 +1064,27 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){
bool
gather_result
(
atrt_config
&
config
,
int
*
result
){
BaseString
tmp
=
g_gather_progname
;
for
(
size_t
i
=
0
;
i
<
config
.
m_processes
.
size
();
i
++
){
atrt_process
&
proc
=
config
.
m_processes
[
i
];
if
(
proc
.
m_proc
.
m_path
!=
""
){
tmp
.
appfmt
(
" %s:%s"
,
proc
.
m_hostname
.
c_str
(),
proc
.
m_proc
.
m_cwd
.
c_str
());
for
(
size_t
i
=
0
;
i
<
config
.
m_hosts
.
size
();
i
++
)
{
tmp
.
appfmt
(
" %s:%s/*"
,
config
.
m_hosts
[
i
]
->
m_hostname
.
c_str
(),
config
.
m_hosts
[
i
]
->
m_basedir
.
c_str
());
g_logger
.
debug
(
"system(%s)"
,
tmp
.
c_str
());
const
int
r1
=
system
(
tmp
.
c_str
());
if
(
r1
!=
0
)
{
g_logger
.
critical
(
"Failed to gather result!"
);
return
false
;
}
}
const
int
r1
=
system
(
tmp
.
c_str
());
if
(
r1
!=
0
){
g_logger
.
critical
(
"Failed to gather result"
);
return
false
;
}
g_logger
.
debug
(
"system(%s)"
,
g_analyze_progname
);
const
int
r2
=
system
(
g_analyze_progname
);
if
(
r2
==
-
1
||
r2
==
(
127
<<
8
)){
if
(
r2
==
-
1
||
r2
==
(
127
<<
8
))
{
g_logger
.
critical
(
"Failed to analyze results"
);
return
false
;
}
...
...
@@ -974,6 +1095,7 @@ gather_result(atrt_config& config, int * result){
bool
setup_hosts
(
atrt_config
&
config
){
g_logger
.
debug
(
"system(%s)"
,
g_clear_progname
);
const
int
r1
=
system
(
g_clear_progname
);
if
(
r1
!=
0
){
g_logger
.
critical
(
"Failed to clear result"
);
...
...
@@ -982,21 +1104,143 @@ setup_hosts(atrt_config& config){
for
(
size_t
i
=
0
;
i
<
config
.
m_hosts
.
size
();
i
++
){
BaseString
tmp
=
g_setup_progname
;
tmp
.
appfmt
(
" %s %s/ %s/
run
"
,
config
.
m_hosts
[
i
]
.
m_hostname
.
c_str
(),
g_
setup_path
,
config
.
m_hosts
[
i
]
.
m_base_
dir
.
c_str
());
tmp
.
appfmt
(
" %s %s/ %s/"
,
config
.
m_hosts
[
i
]
->
m_hostname
.
c_str
(),
g_
basedir
,
config
.
m_hosts
[
i
]
->
m_base
dir
.
c_str
());
g_logger
.
debug
(
"system(%s)"
,
tmp
.
c_str
());
const
int
r1
=
system
(
tmp
.
c_str
());
if
(
r1
!=
0
){
g_logger
.
critical
(
"Failed to setup %s"
,
config
.
m_hosts
[
i
].
m_hostname
.
c_str
());
config
.
m_hosts
[
i
]
->
m_hostname
.
c_str
());
return
false
;
}
}
return
true
;
}
bool
deploy
(
atrt_config
&
config
)
{
for
(
size_t
i
=
0
;
i
<
config
.
m_hosts
.
size
();
i
++
)
{
BaseString
tmp
=
g_setup_progname
;
tmp
.
appfmt
(
" %s %s/ %s"
,
config
.
m_hosts
[
i
]
->
m_hostname
.
c_str
(),
g_prefix
,
g_prefix
);
g_logger
.
info
(
"rsyncing %s to %s"
,
g_prefix
,
config
.
m_hosts
[
i
]
->
m_hostname
.
c_str
());
g_logger
.
debug
(
"system(%s)"
,
tmp
.
c_str
());
const
int
r1
=
system
(
tmp
.
c_str
());
if
(
r1
!=
0
)
{
g_logger
.
critical
(
"Failed to rsync %s to %s"
,
g_prefix
,
config
.
m_hosts
[
i
]
->
m_hostname
.
c_str
());
return
false
;
}
}
return
true
;
}
bool
sshx
(
atrt_config
&
config
,
unsigned
mask
)
{
for
(
size_t
i
=
0
;
i
<
config
.
m_processes
.
size
();
i
++
)
{
atrt_process
&
proc
=
*
config
.
m_processes
[
i
];
BaseString
tmp
;
const
char
*
type
=
0
;
switch
(
proc
.
m_type
){
case
atrt_process
:
:
AP_NDB_MGMD
:
type
=
(
mask
&
proc
.
m_type
)
?
"ndb_mgmd"
:
0
;
break
;
case
atrt_process
:
:
AP_NDBD
:
type
=
(
mask
&
proc
.
m_type
)
?
"ndbd"
:
0
;
break
;
case
atrt_process
:
:
AP_MYSQLD
:
type
=
(
mask
&
proc
.
m_type
)
?
"mysqld"
:
0
;
break
;
case
atrt_process
:
:
AP_NDB_API
:
type
=
(
mask
&
proc
.
m_type
)
?
"ndbapi"
:
0
;
break
;
case
atrt_process
:
:
AP_CLIENT
:
type
=
(
mask
&
proc
.
m_type
)
?
"client"
:
0
;
break
;
default:
type
=
"<unknown>"
;
}
if
(
type
==
0
)
continue
;
tmp
.
appfmt
(
"xterm -fg black -title
\"
%s(%s) on %s
\"
"
" -e 'ssh -t -X %s sh %s/ssh-login.sh' &"
,
type
,
proc
.
m_cluster
->
m_name
.
c_str
(),
proc
.
m_host
->
m_hostname
.
c_str
(),
proc
.
m_host
->
m_hostname
.
c_str
(),
proc
.
m_proc
.
m_cwd
.
c_str
());
g_logger
.
debug
(
"system(%s)"
,
tmp
.
c_str
());
const
int
r1
=
system
(
tmp
.
c_str
());
if
(
r1
!=
0
)
{
g_logger
.
critical
(
"Failed sshx (%s)"
,
tmp
.
c_str
());
return
false
;
}
NdbSleep_MilliSleep
(
300
);
// To prevent xlock problem
}
return
true
;
}
bool
start
(
atrt_config
&
config
,
unsigned
proc_mask
)
{
if
(
proc_mask
&
atrt_process
::
AP_NDB_MGMD
)
if
(
!
start_processes
(
g_config
,
atrt_process
::
AP_NDB_MGMD
))
return
false
;
if
(
proc_mask
&
atrt_process
::
AP_NDBD
)
{
if
(
!
connect_ndb_mgm
(
g_config
)){
return
false
;
}
if
(
!
start_processes
(
g_config
,
atrt_process
::
AP_NDBD
))
return
false
;
if
(
!
wait_ndb
(
g_config
,
NDB_MGM_NODE_STATUS_NOT_STARTED
))
return
false
;
for
(
Uint32
i
=
0
;
i
<
3
;
i
++
)
if
(
wait_ndb
(
g_config
,
NDB_MGM_NODE_STATUS_STARTED
))
goto
started
;
return
false
;
}
started:
if
(
!
start_processes
(
g_config
,
p_servers
&
proc_mask
))
return
false
;
return
true
;
}
void
require
(
bool
x
)
{
if
(
!
x
)
abort
();
}
template
class
Vector
<
Vector
<
SimpleCpcClient
::
Process
>
>
;
template
class
Vector
<
atrt_host
>;
template
class
Vector
<
atrt_process
>;
template
class
Vector
<
atrt_host
*
>;
template
class
Vector
<
atrt_cluster
*
>;
template
class
Vector
<
atrt_process
*
>;
storage/ndb/test/run-test/setup.cpp
0 → 100644
View file @
294df8e7
#include "atrt.hpp"
#include <ndb_global.h>
#include <my_sys.h>
#include <my_getopt.h>
#include <NdbOut.hpp>
static
NdbOut
&
operator
<<
(
NdbOut
&
out
,
const
atrt_process
&
proc
);
static
atrt_host
*
find
(
const
char
*
hostname
,
Vector
<
atrt_host
*>&
);
static
bool
load_process
(
atrt_config
&
,
atrt_cluster
&
,
atrt_process
::
Type
,
size_t
idx
,
const
char
*
hostname
);
static
bool
load_options
(
int
argc
,
char
**
argv
,
int
type
,
atrt_options
&
);
enum
{
PO_NDB
=
atrt_options
::
AO_NDBCLUSTER
,
PO_REP_SLAVE
=
256
,
PO_REP_MASTER
=
512
,
PO_REP
=
(
atrt_options
::
AO_REPLICATION
|
PO_REP_SLAVE
|
PO_REP_MASTER
)
};
struct
proc_option
{
const
char
*
name
;
int
type
;
int
options
;
};
static
struct
proc_option
f_options
[]
=
{
{
"--FileSystemPath="
,
atrt_process
::
AP_NDBD
,
0
}
,{
"--PortNumber="
,
atrt_process
::
AP_NDB_MGMD
,
0
}
,{
"--datadir="
,
atrt_process
::
AP_MYSQLD
,
0
}
,{
"--socket="
,
atrt_process
::
AP_MYSQLD
|
atrt_process
::
AP_CLIENT
,
0
}
,{
"--port="
,
atrt_process
::
AP_MYSQLD
|
atrt_process
::
AP_CLIENT
,
0
}
,{
"--server-id="
,
atrt_process
::
AP_MYSQLD
,
PO_REP
}
,{
"--log-bin"
,
atrt_process
::
AP_MYSQLD
,
PO_REP_MASTER
}
,{
"--master-host="
,
atrt_process
::
AP_MYSQLD
,
PO_REP_SLAVE
}
,{
"--master-port="
,
atrt_process
::
AP_MYSQLD
,
PO_REP_SLAVE
}
,{
"--master-user="
,
atrt_process
::
AP_MYSQLD
,
PO_REP_SLAVE
}
,{
"--master-password="
,
atrt_process
::
AP_MYSQLD
,
PO_REP_SLAVE
}
,{
"--ndb-connectstring="
,
atrt_process
::
AP_MYSQLD
|
atrt_process
::
AP_CLUSTER
,
PO_NDB
}
,{
"--ndbcluster"
,
atrt_process
::
AP_MYSQLD
,
PO_NDB
}
,{
0
,
0
,
0
}
};
const
char
*
ndbcs
=
"--ndb-connectstring="
;
bool
setup_config
(
atrt_config
&
config
)
{
BaseString
tmp
(
g_clusters
);
Vector
<
BaseString
>
clusters
;
tmp
.
split
(
clusters
,
","
);
bool
fqpn
=
clusters
.
size
()
>
1
||
g_fqpn
;
size_t
j
,
k
;
for
(
size_t
i
=
0
;
i
<
clusters
.
size
();
i
++
)
{
struct
atrt_cluster
*
cluster
=
new
atrt_cluster
;
config
.
m_clusters
.
push_back
(
cluster
);
cluster
->
m_name
=
clusters
[
i
];
if
(
fqpn
)
{
cluster
->
m_dir
.
assfmt
(
"cluster%s/"
,
cluster
->
m_name
.
c_str
());
}
else
{
cluster
->
m_dir
=
""
;
}
int
argc
=
1
;
const
char
*
argv
[]
=
{
"atrt"
,
0
,
0
};
BaseString
buf
;
buf
.
assfmt
(
"--defaults-group-suffix=%s"
,
clusters
[
i
].
c_str
());
argv
[
argc
++
]
=
buf
.
c_str
();
char
**
tmp
=
(
char
**
)
argv
;
const
char
*
groups
[]
=
{
"cluster_config"
,
0
};
int
ret
=
load_defaults
(
g_my_cnf
,
groups
,
&
argc
,
&
tmp
);
if
(
ret
)
{
g_logger
.
error
(
"Unable to load defaults for cluster: %s"
,
clusters
[
i
].
c_str
());
return
false
;
}
struct
{
atrt_process
::
Type
type
;
const
char
*
name
;
const
char
*
value
;
}
proc_args
[]
=
{
{
atrt_process
::
AP_NDB_MGMD
,
"--ndb_mgmd="
,
0
},
{
atrt_process
::
AP_NDBD
,
"--ndbd="
,
0
},
{
atrt_process
::
AP_NDB_API
,
"--ndbapi="
,
0
},
{
atrt_process
::
AP_NDB_API
,
"--api="
,
0
},
{
atrt_process
::
AP_MYSQLD
,
"--mysqld="
,
0
},
{
atrt_process
::
AP_ALL
,
0
,
0
}
};
/**
* Find all processes...
*/
for
(
j
=
0
;
j
<
(
size_t
)
argc
;
j
++
)
{
for
(
k
=
0
;
proc_args
[
k
].
name
;
k
++
)
{
if
(
!
strncmp
(
tmp
[
j
],
proc_args
[
k
].
name
,
strlen
(
proc_args
[
k
].
name
)))
{
proc_args
[
k
].
value
=
tmp
[
j
]
+
strlen
(
proc_args
[
k
].
name
);
break
;
}
}
}
/**
* Load each process
*/
for
(
j
=
0
;
proc_args
[
j
].
name
;
j
++
)
{
if
(
proc_args
[
j
].
value
)
{
BaseString
tmp
(
proc_args
[
j
].
value
);
Vector
<
BaseString
>
list
;
tmp
.
split
(
list
,
","
);
for
(
k
=
0
;
k
<
list
.
size
();
k
++
)
if
(
!
load_process
(
config
,
*
cluster
,
proc_args
[
j
].
type
,
k
+
1
,
list
[
k
].
c_str
()))
return
false
;
}
}
{
/**
* Load cluster options
*/
argc
=
1
;
argv
[
argc
++
]
=
buf
.
c_str
();
const
char
*
groups
[]
=
{
"mysql_cluster"
,
0
};
ret
=
load_defaults
(
g_my_cnf
,
groups
,
&
argc
,
&
tmp
);
if
(
ret
)
{
g_logger
.
error
(
"Unable to load defaults for cluster: %s"
,
clusters
[
i
].
c_str
());
return
false
;
}
load_options
(
argc
,
tmp
,
atrt_process
::
AP_CLUSTER
,
cluster
->
m_options
);
}
}
return
true
;
}
static
atrt_host
*
find
(
const
char
*
hostname
,
Vector
<
atrt_host
*>
&
hosts
){
for
(
size_t
i
=
0
;
i
<
hosts
.
size
();
i
++
){
if
(
hosts
[
i
]
->
m_hostname
==
hostname
){
return
hosts
[
i
];
}
}
atrt_host
*
host
=
new
atrt_host
;
host
->
m_index
=
hosts
.
size
();
host
->
m_cpcd
=
new
SimpleCpcClient
(
hostname
,
1234
);
host
->
m_basedir
=
g_basedir
;
host
->
m_user
=
g_user
;
host
->
m_hostname
=
hostname
;
hosts
.
push_back
(
host
);
return
host
;
}
static
bool
load_process
(
atrt_config
&
config
,
atrt_cluster
&
cluster
,
atrt_process
::
Type
type
,
size_t
idx
,
const
char
*
hostname
)
{
atrt_host
*
host_ptr
=
find
(
hostname
,
config
.
m_hosts
);
atrt_process
*
proc_ptr
=
new
atrt_process
;
config
.
m_processes
.
push_back
(
proc_ptr
);
host_ptr
->
m_processes
.
push_back
(
proc_ptr
);
cluster
.
m_processes
.
push_back
(
proc_ptr
);
atrt_process
&
proc
=
*
proc_ptr
;
const
size_t
proc_no
=
config
.
m_processes
.
size
();
proc
.
m_index
=
idx
;
proc
.
m_type
=
type
;
proc
.
m_host
=
host_ptr
;
proc
.
m_cluster
=
&
cluster
;
proc
.
m_options
.
m_features
=
0
;
proc
.
m_rep_src
=
0
;
proc
.
m_proc
.
m_id
=
-
1
;
proc
.
m_proc
.
m_type
=
"temporary"
;
proc
.
m_proc
.
m_owner
=
"atrt"
;
proc
.
m_proc
.
m_group
=
cluster
.
m_name
.
c_str
();
proc
.
m_proc
.
m_stdout
=
"log.out"
;
proc
.
m_proc
.
m_stderr
=
"2>&1"
;
proc
.
m_proc
.
m_runas
=
proc
.
m_host
->
m_user
;
proc
.
m_proc
.
m_ulimit
=
"c:unlimited"
;
proc
.
m_proc
.
m_env
.
assfmt
(
"MYSQL_BASE_DIR=%s"
,
g_prefix
);
proc
.
m_proc
.
m_env
.
appfmt
(
" MYSQL_HOME=%s"
,
g_basedir
);
proc
.
m_proc
.
m_shutdown_options
=
""
;
int
argc
=
1
;
const
char
*
argv
[]
=
{
"atrt"
,
0
,
0
};
BaseString
buf
[
10
];
char
**
tmp
=
(
char
**
)
argv
;
const
char
*
groups
[]
=
{
0
,
0
,
0
,
0
};
switch
(
type
){
case
atrt_process
:
:
AP_NDB_MGMD
:
groups
[
0
]
=
"cluster_config"
;
buf
[
1
].
assfmt
(
"cluster_config.ndb_mgmd.%d"
,
idx
);
groups
[
1
]
=
buf
[
1
].
c_str
();
buf
[
0
].
assfmt
(
"--defaults-group-suffix=%s"
,
cluster
.
m_name
.
c_str
());
argv
[
argc
++
]
=
buf
[
0
].
c_str
();
break
;
case
atrt_process
:
:
AP_NDBD
:
groups
[
0
]
=
"cluster_config"
;
buf
[
1
].
assfmt
(
"cluster_config.ndbd.%d"
,
idx
);
groups
[
1
]
=
buf
[
1
].
c_str
();
buf
[
0
].
assfmt
(
"--defaults-group-suffix=%s"
,
cluster
.
m_name
.
c_str
());
argv
[
argc
++
]
=
buf
[
0
].
c_str
();
break
;
case
atrt_process
:
:
AP_MYSQLD
:
groups
[
0
]
=
"mysqld"
;
groups
[
1
]
=
"mysql_cluster"
;
buf
[
0
].
assfmt
(
"--defaults-group-suffix=.%d%s"
,
idx
,
cluster
.
m_name
.
c_str
());
argv
[
argc
++
]
=
buf
[
0
].
c_str
();
break
;
case
atrt_process
:
:
AP_CLIENT
:
buf
[
0
].
assfmt
(
"client.%d%s"
,
idx
,
cluster
.
m_name
.
c_str
());
groups
[
0
]
=
buf
[
0
].
c_str
();
break
;
case
atrt_process
:
:
AP_NDB_API
:
break
;
default:
g_logger
.
critical
(
"Unhandled process type: %d"
,
type
);
return
false
;
}
int
ret
=
load_defaults
(
g_my_cnf
,
groups
,
&
argc
,
&
tmp
);
if
(
ret
)
{
g_logger
.
error
(
"Unable to load defaults for cluster: %s"
,
cluster
.
m_name
.
c_str
());
return
false
;
}
load_options
(
argc
,
tmp
,
type
,
proc
.
m_options
);
BaseString
dir
;
dir
.
assfmt
(
"%s/%s"
,
proc
.
m_host
->
m_basedir
.
c_str
(),
cluster
.
m_dir
.
c_str
());
switch
(
type
){
case
atrt_process
:
:
AP_NDB_MGMD
:
{
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
proc_no
,
"ndb_mgmd"
);
proc
.
m_proc
.
m_path
.
assign
(
g_prefix
).
append
(
"/libexec/ndb_mgmd"
);
proc
.
m_proc
.
m_args
.
assfmt
(
"--defaults-file=%s/my.cnf"
,
proc
.
m_host
->
m_basedir
.
c_str
());
proc
.
m_proc
.
m_args
.
appfmt
(
" --defaults-group-suffix=%s"
,
cluster
.
m_name
.
c_str
());
proc
.
m_proc
.
m_args
.
append
(
" --nodaemon --mycnf"
);
proc
.
m_proc
.
m_cwd
.
assfmt
(
"%sndb_mgmd.%d"
,
dir
.
c_str
(),
proc
.
m_index
);
proc
.
m_proc
.
m_env
.
appfmt
(
" MYSQL_GROUP_SUFFIX=%s"
,
cluster
.
m_name
.
c_str
());
break
;
}
case
atrt_process
:
:
AP_NDBD
:
{
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
proc_no
,
"ndbd"
);
proc
.
m_proc
.
m_path
.
assign
(
g_prefix
).
append
(
"/libexec/ndbd"
);
proc
.
m_proc
.
m_args
.
assfmt
(
"--defaults-file=%s/my.cnf"
,
proc
.
m_host
->
m_basedir
.
c_str
());
proc
.
m_proc
.
m_args
.
appfmt
(
" --defaults-group-suffix=%s"
,
cluster
.
m_name
.
c_str
());
proc
.
m_proc
.
m_args
.
append
(
" --nodaemon -n"
);
proc
.
m_proc
.
m_cwd
.
assfmt
(
"%sndbd.%d"
,
dir
.
c_str
(),
proc
.
m_index
);
proc
.
m_proc
.
m_env
.
appfmt
(
" MYSQL_GROUP_SUFFIX=%s"
,
cluster
.
m_name
.
c_str
());
break
;
}
case
atrt_process
:
:
AP_MYSQLD
:
{
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
proc_no
,
"mysqld"
);
proc
.
m_proc
.
m_path
.
assign
(
g_prefix
).
append
(
"/libexec/mysqld"
);
proc
.
m_proc
.
m_args
.
assfmt
(
"--defaults-file=%s/my.cnf"
,
proc
.
m_host
->
m_basedir
.
c_str
());
proc
.
m_proc
.
m_args
.
appfmt
(
" --defaults-group-suffix=.%d%s"
,
proc
.
m_index
,
cluster
.
m_name
.
c_str
());
proc
.
m_proc
.
m_args
.
append
(
" --core-file"
);
proc
.
m_proc
.
m_cwd
.
appfmt
(
"%smysqld.%d"
,
dir
.
c_str
(),
proc
.
m_index
);
proc
.
m_proc
.
m_shutdown_options
=
"SIGKILL"
;
// not nice
proc
.
m_proc
.
m_env
.
appfmt
(
" MYSQL_GROUP_SUFFIX=.%d%s"
,
proc
.
m_index
,
cluster
.
m_name
.
c_str
());
break
;
}
case
atrt_process
:
:
AP_NDB_API
:
{
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
proc_no
,
"ndb_api"
);
proc
.
m_proc
.
m_path
=
""
;
proc
.
m_proc
.
m_args
=
""
;
proc
.
m_proc
.
m_cwd
.
appfmt
(
"%sndb_api.%d"
,
dir
.
c_str
(),
proc
.
m_index
);
proc
.
m_proc
.
m_env
.
appfmt
(
" MYSQL_GROUP_SUFFIX=%s"
,
cluster
.
m_name
.
c_str
());
break
;
}
case
atrt_process
:
:
AP_CLIENT
:
{
proc
.
m_proc
.
m_name
.
assfmt
(
"%d-%s"
,
proc_no
,
"mysql"
);
proc
.
m_proc
.
m_path
=
""
;
proc
.
m_proc
.
m_args
=
""
;
proc
.
m_proc
.
m_cwd
.
appfmt
(
"%s/client.%d"
,
dir
.
c_str
(),
proc
.
m_index
);
proc
.
m_proc
.
m_env
.
appfmt
(
" MYSQL_GROUP_SUFFIX=.%d%s"
,
proc
.
m_index
,
cluster
.
m_name
.
c_str
());
break
;
}
case
atrt_process
:
:
AP_ALL
:
case
atrt_process
:
:
AP_CLUSTER
:
g_logger
.
critical
(
"Unhandled process type: %d"
,
proc
.
m_type
);
return
false
;
}
if
(
proc
.
m_proc
.
m_path
.
length
())
{
proc
.
m_proc
.
m_env
.
appfmt
(
" CMD=
\"
%s"
,
proc
.
m_proc
.
m_path
.
c_str
());
if
(
proc
.
m_proc
.
m_args
.
length
())
proc
.
m_proc
.
m_env
.
append
(
" "
);
proc
.
m_proc
.
m_env
.
append
(
proc
.
m_proc
.
m_args
);
proc
.
m_proc
.
m_env
.
append
(
"
\"
"
);
}
if
(
type
==
atrt_process
::
AP_MYSQLD
)
{
/**
* Add a client for each mysqld
*/
if
(
!
load_process
(
config
,
cluster
,
atrt_process
::
AP_CLIENT
,
idx
,
hostname
))
{
return
false
;
}
}
if
(
type
==
atrt_process
::
AP_CLIENT
)
{
proc
.
m_mysqld
=
cluster
.
m_processes
[
cluster
.
m_processes
.
size
()
-
2
];
}
return
true
;
}
static
bool
load_options
(
int
argc
,
char
**
argv
,
int
type
,
atrt_options
&
opts
)
{
for
(
size_t
i
=
0
;
i
<
(
size_t
)
argc
;
i
++
)
{
for
(
size_t
j
=
0
;
f_options
[
j
].
name
;
j
++
)
{
const
char
*
name
=
f_options
[
j
].
name
;
const
size_t
len
=
strlen
(
name
);
if
((
f_options
[
j
].
type
&
type
)
&&
strncmp
(
argv
[
i
],
name
,
len
)
==
0
)
{
opts
.
m_loaded
.
put
(
name
,
argv
[
i
]
+
len
,
true
);
break
;
}
}
}
return
true
;
}
struct
proc_rule_ctx
{
int
m_setup
;
atrt_config
*
m_config
;
atrt_host
*
m_host
;
atrt_cluster
*
m_cluster
;
atrt_process
*
m_process
;
};
struct
proc_rule
{
int
type
;
bool
(
*
func
)(
Properties
&
prop
,
proc_rule_ctx
&
,
int
extra
);
int
extra
;
};
static
bool
pr_check_replication
(
Properties
&
,
proc_rule_ctx
&
,
int
);
static
bool
pr_check_features
(
Properties
&
,
proc_rule_ctx
&
,
int
);
static
bool
pr_fix_client
(
Properties
&
,
proc_rule_ctx
&
,
int
);
static
bool
pr_proc_options
(
Properties
&
,
proc_rule_ctx
&
,
int
);
static
bool
pr_fix_ndb_connectstring
(
Properties
&
,
proc_rule_ctx
&
,
int
);
static
bool
pr_set_ndb_connectstring
(
Properties
&
,
proc_rule_ctx
&
,
int
);
static
bool
pr_check_proc
(
Properties
&
,
proc_rule_ctx
&
,
int
);
static
proc_rule
f_rules
[]
=
{
{
atrt_process
::
AP_CLUSTER
,
pr_check_features
,
0
}
,{
atrt_process
::
AP_MYSQLD
,
pr_check_replication
,
0
}
,{
(
atrt_process
::
AP_ALL
&
~
atrt_process
::
AP_CLIENT
),
pr_proc_options
,
~
(
PO_REP
|
PO_NDB
)
}
,{
(
atrt_process
::
AP_ALL
&
~
atrt_process
::
AP_CLIENT
),
pr_proc_options
,
PO_REP
}
,{
atrt_process
::
AP_CLIENT
,
pr_fix_client
,
0
}
,{
atrt_process
::
AP_CLUSTER
,
pr_fix_ndb_connectstring
,
0
}
,{
atrt_process
::
AP_MYSQLD
,
pr_set_ndb_connectstring
,
0
}
,{
atrt_process
::
AP_ALL
,
pr_check_proc
,
0
}
,{
0
,
0
,
0
}
};
bool
configure
(
atrt_config
&
config
,
int
setup
)
{
Properties
props
;
for
(
size_t
i
=
0
;
f_rules
[
i
].
func
;
i
++
)
{
bool
ok
=
true
;
proc_rule_ctx
ctx
;
bzero
(
&
ctx
,
sizeof
(
ctx
));
ctx
.
m_setup
=
setup
;
ctx
.
m_config
=
&
config
;
for
(
size_t
j
=
0
;
j
<
config
.
m_clusters
.
size
();
j
++
)
{
ctx
.
m_cluster
=
config
.
m_clusters
[
j
];
if
(
f_rules
[
i
].
type
&
atrt_process
::
AP_CLUSTER
)
{
g_logger
.
debug
(
"applying rule %d to cluster %s"
,
i
,
ctx
.
m_cluster
->
m_name
.
c_str
());
if
(
!
(
*
f_rules
[
i
].
func
)(
props
,
ctx
,
f_rules
[
i
].
extra
))
ok
=
false
;
}
else
{
atrt_cluster
&
cluster
=
*
config
.
m_clusters
[
j
];
for
(
size_t
k
=
0
;
k
<
cluster
.
m_processes
.
size
();
k
++
)
{
atrt_process
&
proc
=
*
cluster
.
m_processes
[
k
];
ctx
.
m_process
=
cluster
.
m_processes
[
k
];
if
(
proc
.
m_type
&
f_rules
[
i
].
type
)
{
g_logger
.
debug
(
"applying rule %d to %s"
,
i
,
proc
.
m_proc
.
m_cwd
.
c_str
());
if
(
!
(
*
f_rules
[
i
].
func
)(
props
,
ctx
,
f_rules
[
i
].
extra
))
ok
=
false
;
}
}
}
}
if
(
!
ok
)
{
return
false
;
}
}
return
true
;
}
static
atrt_process
*
find
(
atrt_config
&
config
,
int
type
,
const
char
*
name
)
{
BaseString
tmp
(
name
);
Vector
<
BaseString
>
src
;
Vector
<
BaseString
>
dst
;
tmp
.
split
(
src
,
"."
);
if
(
src
.
size
()
!=
2
)
{
return
0
;
}
atrt_cluster
*
cluster
=
0
;
BaseString
cl
;
cl
.
appfmt
(
".%s"
,
src
[
1
].
c_str
());
for
(
size_t
i
=
0
;
i
<
config
.
m_clusters
.
size
();
i
++
)
{
if
(
config
.
m_clusters
[
i
]
->
m_name
==
cl
)
{
cluster
=
config
.
m_clusters
[
i
];
break
;
}
}
if
(
cluster
==
0
)
{
return
0
;
}
int
idx
=
atoi
(
src
[
0
].
c_str
())
-
1
;
for
(
size_t
i
=
0
;
i
<
cluster
->
m_processes
.
size
();
i
++
)
{
if
(
cluster
->
m_processes
[
i
]
->
m_type
&
type
)
{
if
(
idx
==
0
)
return
cluster
->
m_processes
[
i
];
else
idx
--
;
}
}
return
0
;
}
static
bool
pr_check_replication
(
Properties
&
props
,
proc_rule_ctx
&
ctx
,
int
)
{
if
(
!
(
ctx
.
m_config
->
m_replication
==
""
))
{
Vector
<
BaseString
>
list
;
ctx
.
m_config
->
m_replication
.
split
(
list
,
";"
);
atrt_config
&
config
=
*
ctx
.
m_config
;
ctx
.
m_config
->
m_replication
=
""
;
const
char
*
msg
=
"Invalid replication specification"
;
for
(
size_t
i
=
0
;
i
<
list
.
size
();
i
++
)
{
Vector
<
BaseString
>
rep
;
list
[
i
].
split
(
rep
,
":"
);
if
(
rep
.
size
()
!=
2
)
{
g_logger
.
error
(
"%s: %s (split: %d)"
,
msg
,
list
[
i
].
c_str
(),
rep
.
size
());
return
false
;
}
atrt_process
*
src
=
find
(
config
,
atrt_process
::
AP_MYSQLD
,
rep
[
0
].
c_str
());
atrt_process
*
dst
=
find
(
config
,
atrt_process
::
AP_MYSQLD
,
rep
[
1
].
c_str
());
if
(
src
==
0
||
dst
==
0
)
{
g_logger
.
error
(
"%s: %s (%d %d)"
,
msg
,
list
[
i
].
c_str
(),
src
!=
0
,
dst
!=
0
);
return
false
;
}
if
(
dst
->
m_rep_src
!=
0
)
{
g_logger
.
error
(
"%s: %s : %s already has replication src (%s)"
,
msg
,
list
[
i
].
c_str
(),
dst
->
m_proc
.
m_cwd
.
c_str
(),
dst
->
m_rep_src
->
m_proc
.
m_cwd
.
c_str
());
return
false
;
}
dst
->
m_rep_src
=
src
;
src
->
m_rep_dst
.
push_back
(
dst
);
src
->
m_options
.
m_features
|=
PO_REP_MASTER
;
dst
->
m_options
.
m_features
|=
PO_REP_SLAVE
;
}
}
return
true
;
}
static
bool
pr_check_features
(
Properties
&
props
,
proc_rule_ctx
&
ctx
,
int
)
{
int
features
=
0
;
atrt_cluster
&
cluster
=
*
ctx
.
m_cluster
;
for
(
size_t
i
=
0
;
i
<
cluster
.
m_processes
.
size
();
i
++
)
{
if
(
cluster
.
m_processes
[
i
]
->
m_type
==
atrt_process
::
AP_NDB_MGMD
||
cluster
.
m_processes
[
i
]
->
m_type
==
atrt_process
::
AP_NDB_API
||
cluster
.
m_processes
[
i
]
->
m_type
==
atrt_process
::
AP_NDBD
)
{
features
|=
atrt_options
::
AO_NDBCLUSTER
;
break
;
}
}
if
(
features
)
{
cluster
.
m_options
.
m_features
|=
features
;
for
(
size_t
i
=
0
;
i
<
cluster
.
m_processes
.
size
();
i
++
)
{
cluster
.
m_processes
[
i
]
->
m_options
.
m_features
|=
features
;
}
}
return
true
;
}
static
bool
pr_fix_client
(
Properties
&
props
,
proc_rule_ctx
&
ctx
,
int
)
{
for
(
size_t
i
=
0
;
f_options
[
i
].
name
;
i
++
)
{
proc_option
&
opt
=
f_options
[
i
];
const
char
*
name
=
opt
.
name
;
if
(
opt
.
type
&
atrt_process
::
AP_CLIENT
)
{
const
char
*
val
;
atrt_process
&
proc
=
*
ctx
.
m_process
;
if
(
!
proc
.
m_options
.
m_loaded
.
get
(
name
,
&
val
))
{
require
(
proc
.
m_mysqld
->
m_options
.
m_loaded
.
get
(
name
,
&
val
));
proc
.
m_options
.
m_loaded
.
put
(
name
,
val
);
proc
.
m_options
.
m_generated
.
put
(
name
,
val
);
}
}
}
return
true
;
}
static
Uint32
try_default_port
(
atrt_process
&
proc
,
const
char
*
name
)
{
Uint32
port
=
strcmp
(
name
,
"--port="
)
==
0
?
3306
:
strcmp
(
name
,
"--PortNumber="
)
==
0
?
1186
:
0
;
atrt_host
*
host
=
proc
.
m_host
;
for
(
size_t
i
=
0
;
i
<
host
->
m_processes
.
size
();
i
++
)
{
const
char
*
val
;
if
(
host
->
m_processes
[
i
]
->
m_options
.
m_loaded
.
get
(
name
,
&
val
))
{
if
((
Uint32
)
atoi
(
val
)
==
port
)
return
0
;
}
}
return
port
;
}
static
bool
generate
(
atrt_process
&
proc
,
const
char
*
name
,
Properties
&
props
)
{
atrt_options
&
opts
=
proc
.
m_options
;
if
(
strcmp
(
name
,
"--port="
)
==
0
||
strcmp
(
name
,
"--PortNumber="
)
==
0
)
{
Uint32
val
;
if
(
g_default_ports
==
0
||
(
val
=
try_default_port
(
proc
,
name
))
==
0
)
{
val
=
g_baseport
;
props
.
get
(
"--PortNumber="
,
&
val
);
props
.
put
(
"--PortNumber="
,
(
val
+
1
),
true
);
}
char
buf
[
255
];
snprintf
(
buf
,
sizeof
(
buf
),
"%u"
,
val
);
opts
.
m_loaded
.
put
(
name
,
buf
);
opts
.
m_generated
.
put
(
name
,
buf
);
return
true
;
}
else
if
(
strcmp
(
name
,
"--datadir="
)
==
0
)
{
opts
.
m_loaded
.
put
(
name
,
proc
.
m_proc
.
m_cwd
.
c_str
());
opts
.
m_generated
.
put
(
name
,
proc
.
m_proc
.
m_cwd
.
c_str
());
return
true
;
}
else
if
(
strcmp
(
name
,
"--FileSystemPath="
)
==
0
)
{
BaseString
dir
;
dir
.
append
(
proc
.
m_host
->
m_basedir
);
dir
.
append
(
"/"
);
dir
.
append
(
proc
.
m_cluster
->
m_dir
);
opts
.
m_loaded
.
put
(
name
,
dir
.
c_str
());
opts
.
m_generated
.
put
(
name
,
dir
.
c_str
());
return
true
;
}
else
if
(
strcmp
(
name
,
"--socket="
)
==
0
)
{
const
char
*
sock
=
0
;
if
(
g_default_ports
)
{
sock
=
"/tmp/mysql.sock"
;
atrt_host
*
host
=
proc
.
m_host
;
for
(
size_t
i
=
0
;
i
<
host
->
m_processes
.
size
();
i
++
)
{
const
char
*
val
;
if
(
host
->
m_processes
[
i
]
->
m_options
.
m_loaded
.
get
(
name
,
&
val
))
{
if
(
strcmp
(
sock
,
val
)
==
0
)
{
sock
=
0
;
break
;
}
}
}
}
BaseString
tmp
;
if
(
sock
==
0
)
{
tmp
.
assfmt
(
"%s/mysql.sock"
,
proc
.
m_proc
.
m_cwd
.
c_str
());
sock
=
tmp
.
c_str
();
}
opts
.
m_loaded
.
put
(
name
,
sock
);
opts
.
m_generated
.
put
(
name
,
sock
);
return
true
;
}
else
if
(
strcmp
(
name
,
"--server-id="
)
==
0
)
{
Uint32
val
=
1
;
props
.
get
(
name
,
&
val
);
char
buf
[
255
];
snprintf
(
buf
,
sizeof
(
buf
),
"%u"
,
val
);
opts
.
m_loaded
.
put
(
name
,
buf
);
opts
.
m_generated
.
put
(
name
,
buf
);
props
.
put
(
name
,
(
val
+
1
),
true
);
return
true
;
}
else
if
(
strcmp
(
name
,
"--log-bin"
)
==
0
)
{
opts
.
m_loaded
.
put
(
name
,
""
);
opts
.
m_generated
.
put
(
name
,
""
);
return
true
;
}
else
if
(
strcmp
(
name
,
"--master-host="
)
==
0
)
{
require
(
proc
.
m_rep_src
!=
0
);
opts
.
m_loaded
.
put
(
name
,
proc
.
m_rep_src
->
m_host
->
m_hostname
.
c_str
());
opts
.
m_generated
.
put
(
name
,
proc
.
m_rep_src
->
m_host
->
m_hostname
.
c_str
());
return
true
;
}
else
if
(
strcmp
(
name
,
"--master-port="
)
==
0
)
{
const
char
*
val
;
require
(
proc
.
m_rep_src
->
m_options
.
m_loaded
.
get
(
"--port="
,
&
val
));
opts
.
m_loaded
.
put
(
name
,
val
);
opts
.
m_generated
.
put
(
name
,
val
);
return
true
;
}
else
if
(
strcmp
(
name
,
"--master-user="
)
==
0
)
{
opts
.
m_loaded
.
put
(
name
,
"root"
);
opts
.
m_generated
.
put
(
name
,
"root"
);
return
true
;
}
else
if
(
strcmp
(
name
,
"--master-password="
)
==
0
)
{
opts
.
m_loaded
.
put
(
name
,
"
\"\"
"
);
opts
.
m_generated
.
put
(
name
,
"
\"\"
"
);
return
true
;
}
g_logger
.
warning
(
"Unknown parameter: %s"
,
name
);
return
true
;
}
static
bool
pr_proc_options
(
Properties
&
props
,
proc_rule_ctx
&
ctx
,
int
extra
)
{
for
(
size_t
i
=
0
;
f_options
[
i
].
name
;
i
++
)
{
proc_option
&
opt
=
f_options
[
i
];
atrt_process
&
proc
=
*
ctx
.
m_process
;
const
char
*
name
=
opt
.
name
;
if
(
opt
.
type
&
proc
.
m_type
)
{
if
(
opt
.
options
==
0
||
(
opt
.
options
&
extra
&
proc
.
m_options
.
m_features
))
{
const
char
*
val
;
if
(
!
proc
.
m_options
.
m_loaded
.
get
(
name
,
&
val
))
{
generate
(
proc
,
name
,
props
);
}
}
}
}
return
true
;
}
static
bool
pr_fix_ndb_connectstring
(
Properties
&
props
,
proc_rule_ctx
&
ctx
,
int
)
{
const
char
*
val
;
atrt_cluster
&
cluster
=
*
ctx
.
m_cluster
;
if
(
cluster
.
m_options
.
m_features
&
atrt_options
::
AO_NDBCLUSTER
)
{
if
(
!
cluster
.
m_options
.
m_loaded
.
get
(
ndbcs
,
&
val
))
{
/**
* Construct connect string for this cluster
*/
BaseString
str
;
for
(
size_t
i
=
0
;
i
<
cluster
.
m_processes
.
size
();
i
++
)
{
atrt_process
*
tmp
=
cluster
.
m_processes
[
i
];
if
(
tmp
->
m_type
==
atrt_process
::
AP_NDB_MGMD
)
{
if
(
str
.
length
())
{
str
.
append
(
";"
);
}
const
char
*
port
;
require
(
tmp
->
m_options
.
m_loaded
.
get
(
"--PortNumber="
,
&
port
));
str
.
appfmt
(
"%s:%s"
,
tmp
->
m_host
->
m_hostname
.
c_str
(),
port
);
}
}
cluster
.
m_options
.
m_loaded
.
put
(
ndbcs
,
str
.
c_str
());
cluster
.
m_options
.
m_generated
.
put
(
ndbcs
,
str
.
c_str
());
cluster
.
m_options
.
m_loaded
.
get
(
ndbcs
,
&
val
);
}
for
(
size_t
i
=
0
;
i
<
cluster
.
m_processes
.
size
();
i
++
)
{
cluster
.
m_processes
[
i
]
->
m_proc
.
m_env
.
appfmt
(
" NDB_CONNECTSTRING=%s"
,
val
);
}
}
return
true
;
}
static
bool
pr_set_ndb_connectstring
(
Properties
&
props
,
proc_rule_ctx
&
ctx
,
int
)
{
const
char
*
val
;
atrt_process
&
proc
=
*
ctx
.
m_process
;
if
(
proc
.
m_options
.
m_features
&
atrt_options
::
AO_NDBCLUSTER
)
{
if
(
!
proc
.
m_options
.
m_loaded
.
get
(
ndbcs
,
&
val
))
{
require
(
proc
.
m_cluster
->
m_options
.
m_loaded
.
get
(
ndbcs
,
&
val
));
proc
.
m_options
.
m_loaded
.
put
(
ndbcs
,
val
);
proc
.
m_options
.
m_generated
.
put
(
ndbcs
,
val
);
}
if
(
!
proc
.
m_options
.
m_loaded
.
get
(
"--ndbcluster"
,
&
val
))
{
proc
.
m_options
.
m_loaded
.
put
(
"--ndbcluster"
,
""
);
proc
.
m_options
.
m_generated
.
put
(
"--ndbcluster"
,
""
);
}
}
return
true
;
}
static
bool
pr_check_proc
(
Properties
&
props
,
proc_rule_ctx
&
ctx
,
int
)
{
bool
ok
=
true
;
bool
generated
=
false
;
const
int
setup
=
ctx
.
m_setup
;
atrt_process
&
proc
=
*
ctx
.
m_process
;
for
(
size_t
i
=
0
;
f_options
[
i
].
name
;
i
++
)
{
proc_option
&
opt
=
f_options
[
i
];
const
char
*
name
=
opt
.
name
;
if
((
ctx
.
m_process
->
m_type
&
opt
.
type
)
&&
(
opt
.
options
==
0
||
(
ctx
.
m_process
->
m_options
.
m_features
&
opt
.
options
)))
{
const
char
*
val
;
if
(
!
proc
.
m_options
.
m_loaded
.
get
(
name
,
&
val
))
{
ok
=
false
;
g_logger
.
warning
(
"Missing paramter: %s for %s"
,
name
,
proc
.
m_proc
.
m_cwd
.
c_str
());
}
else
if
(
proc
.
m_options
.
m_generated
.
get
(
name
,
&
val
))
{
if
(
setup
==
0
)
{
ok
=
false
;
g_logger
.
warning
(
"Missing paramter: %s for %s"
,
name
,
proc
.
m_proc
.
m_cwd
.
c_str
());
}
else
{
generated
=
true
;
}
}
}
}
if
(
generated
)
{
ctx
.
m_config
->
m_generated
=
true
;
}
//ndbout << proc << endl;
return
ok
;
}
NdbOut
&
operator
<<
(
NdbOut
&
out
,
const
atrt_process
&
proc
)
{
out
<<
"[ atrt_process: "
;
switch
(
proc
.
m_type
){
case
atrt_process
:
:
AP_NDB_MGMD
:
out
<<
"ndb_mgmd"
;
break
;
case
atrt_process
:
:
AP_NDBD
:
out
<<
"ndbd"
;
break
;
case
atrt_process
:
:
AP_MYSQLD
:
out
<<
"mysqld"
;
break
;
case
atrt_process
:
:
AP_NDB_API
:
out
<<
"ndbapi"
;
break
;
case
atrt_process
:
:
AP_CLIENT
:
out
<<
"client"
;
break
;
default:
out
<<
"<unknown: "
<<
(
int
)
proc
.
m_type
<<
" >"
;
}
out
<<
" cluster: "
<<
proc
.
m_cluster
->
m_name
.
c_str
()
<<
" host: "
<<
proc
.
m_host
->
m_hostname
.
c_str
()
<<
endl
<<
" cwd: "
<<
proc
.
m_proc
.
m_cwd
.
c_str
()
<<
endl
<<
" path: "
<<
proc
.
m_proc
.
m_path
.
c_str
()
<<
endl
<<
" args: "
<<
proc
.
m_proc
.
m_args
.
c_str
()
<<
endl
<<
" env: "
<<
proc
.
m_proc
.
m_env
.
c_str
()
<<
endl
;
proc
.
m_options
.
m_generated
.
print
(
stdout
,
"generated: "
);
out
<<
" ]"
;
#if 0
proc.m_index = 0; //idx;
proc.m_host = host_ptr;
proc.m_cluster = cluster;
proc.m_proc.m_id = -1;
proc.m_proc.m_type = "temporary";
proc.m_proc.m_owner = "atrt";
proc.m_proc.m_group = cluster->m_name.c_str();
proc.m_proc.m_cwd.assign(dir).append("/atrt/").append(cluster->m_dir);
proc.m_proc.m_stdout = "log.out";
proc.m_proc.m_stderr = "2>&1";
proc.m_proc.m_runas = proc.m_host->m_user;
proc.m_proc.m_ulimit = "c:unlimited";
proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir);
proc.m_proc.m_shutdown_options = "";
#endif
return
out
;
}
storage/ndb/test/run-test/test-tests.txt
0 → 100644
View file @
294df8e7
max-time: 600
cmd: testBasic
args: -n PkRead T1
max-time: 1800
cmd: testMgm
args: -n SingleUserMode T1
#
#
# SYSTEM RESTARTS
#
max-time: 1500
cmd: testSystemRestart
args: -n SR3 T6
max-time: 1500
cmd: testSystemRestart
args: -n SR4 T6
max-time: 600
cmd: testBasic
args: -n PkRead T1
storage/ndb/test/tools/Makefile.am
View file @
294df8e7
...
...
@@ -38,6 +38,7 @@ include $(top_srcdir)/storage/ndb/config/common.mk.am
include
$(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am
ndb_cpcc_LDADD
=
$(LDADD)
ndb_cpcc_LDFLAGS
=
-static
# Don't update the files from bitkeeper
%
::
SCCS/s.%
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment