Commit 0652902a authored by monty@work.mysql.com's avatar monty@work.mysql.com

merge

parents 23c049e3 a2e9e16f
......@@ -314,11 +314,15 @@ sql-bench/Results-linux/ATIS-mysql_bdb-Linux_2.2.14_my_SMP_i686
sql-bench/bench-count-distinct
sql-bench/bench-init.pl
sql-bench/compare-results
sql-bench/compare-results-all
sql-bench/copy-db
sql-bench/crash-me
sql-bench/gif/*
sql-bench/graph-compare-results
sql-bench/output/*
sql-bench/run-all-tests
sql-bench/server-cfg
sql-bench/template.html
sql-bench/test-ATIS
sql-bench/test-alter-table
sql-bench/test-big-tables
......
......@@ -3,6 +3,7 @@
#shift
TO=dev-public@mysql.com
FROM=$USER@mysql.com
INTERNALS=internals@lists.mysql.com
LIMIT=10000
if [ "$REAL_EMAIL" = "" ]
......@@ -24,6 +25,23 @@ From: $FROM
To: $TO
Subject: bk commit - 4.0 tree
EOF
bk changes -v -r+
bk cset -r+ -d
) | head -n $LIMIT | /usr/sbin/sendmail -t
echo "Notifying internals list at $INTERNALS"
(
cat <<EOF
List-ID: <bk.mysql>
From: $FROM
To: $INTERNALS
Subject: bk commit into 3.23 tree
Below is the list of changes that have just been pushed into main
3.23. repository. For information on how to access the repository
see http://www.mysql.com/doc/I/n/Installing_source_tree.html
>>>>>>> BitKeeper/tmp/post-commit_sasha@1.8.1.3
EOF
bk changes -v -r+
bk cset -r+ -d
......
......@@ -4,10 +4,10 @@ use Getopt::Long;
$opt_distribution=$opt_user=$opt_result=$opt_config_options=$opt_config_env="";
$opt_dbd_options=$opt_perl_options=$opt_suffix="";
$opt_tmp=$version_suffix="";
$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=0;
$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=0;
$opt_innodb=$opt_bdb=0;
GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution") || usage();
GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution","enable-shared","no-crash-me","no-strip") || usage();
usage() if ($opt_help || $opt_Information);
usage() if (!$opt_distribution);
......@@ -19,7 +19,7 @@ if ($opt_innodb || $opt_bdb)
chomp($host=`hostname`);
$full_host_name=$host;
print "$host: Compiling MySQL$version_suffix at $host$suffix, stage: $opt_stage\n" if ($opt_debug);
info("Compiling MySQL$version_suffix at $host$suffix, stage: $opt_stage\n");
$connect_option= ($opt_tcpip ? "--host=$host" : "");
$host =~ /^([^.-]*)/;
$host=$1 . $opt_suffix;
......@@ -119,7 +119,10 @@ if ($opt_stage <= 1)
{
$opt_config_options.=" --with-client-ldflags=-all-static";
}
if (!$opt_enable_shared)
{
$opt_config_options.= " --disable-shared"; # Default for binary versions
}
if ($opt_bdb)
{
$opt_config_options.= " --with-berkeley-db"
......@@ -146,10 +149,13 @@ if ($opt_stage <= 2)
#
if ($opt_stage <= 3)
{
my ($flags);
log_system("rm -fr mysql-3* mysql-4* $pwd/$host/*.tar.gz");
log_system("nm -n sql/mysqld | gzip -9 -v 2>&1 > sql/mysqld.sym.gz | cat");
log_system("strip sql/mysqld extra/comp_err client/mysql sql/mysqld client/mysqlshow extra/replace isam/isamchk client/mysqladmin client/mysqldump extra/perror");
check_system("scripts/make_binary_distribution $opt_tmp $opt_suffix",".tar.gz created");
$flags="";
$flags.="--no-strip" if ($opt_no_strip);
check_system("scripts/make_binary_distribution --tmp=$opt_tmp --suffix=$opt_suffix $flags",".tar.gz created");
safe_system("mv mysql*.tar.gz $pwd/$host");
safe_system("cp client/mysqladmin $pwd/$host/bin");
safe_system("$make clean") if ($opt_with_small_disk);
......@@ -174,6 +180,7 @@ if ($opt_stage <= 4 && !$opt_no_test)
$tar_file =~ /(mysql-[^\/]*)\.tar/;
$ver=$1;
$test_dir="$pwd/$host/test/$ver";
$ENV{"LD_LIBRARY_PATH"}= "$testdir/lib:" . $ENV{"LD_LIBRARY_PATH"};
if ($opt_stage <= 5 && !$opt_no_test && !$opt_no_mysqltest)
{
......@@ -237,7 +244,7 @@ if ($opt_stage <= 7 && $opt_perl_files && !$opt_no_perl && !$opt_no_test)
}
if ($opt_stage <= 8 && !$opt_no_test)
if ($opt_stage <= 8 && !$opt_no_test && !$opt_no_crash_me)
{
safe_cd("$test_dir/sql-bench");
log_system("rm -f limits/mysql.cfg");
......
......@@ -369,7 +369,7 @@ The MySQL Access Privilege System
* Request access:: Access control, stage 2: Request verification
* Privilege changes:: When privilege changes take effect
* Default privileges:: Setting up the initial @strong{MySQL} privileges
* Adding users:: Adding new user privileges to @strong{MySQL}
* Adding users:: Adding new users to @strong{MySQL}
* Passwords:: How to set up passwords
* Access denied:: Causes of @code{Access denied} errors
......@@ -688,7 +688,7 @@ System/Compile Time and Startup Parameter Tuning
* Compile and link options:: How compiling and linking affects the speed of MySQL
* Disk issues:: Disk issues
* Symbolic links::
* Symbolic links:: Using Symbolic Links
* Server parameters:: Tuning server parameters
* Table cache:: How MySQL opens and closes tables
* Creating many tables:: Drawbacks of creating large numbers of tables in the same database
......@@ -954,6 +954,17 @@ How MySQL Compares to @code{mSQL}
* Protocol differences:: How @code{mSQL} and @strong{MySQL} client/server communications protocols differ
* Syntax differences:: How @code{mSQL} 2.0 SQL syntax differs from @strong{MySQL}
How MySQL Compares to PostgreSQL
* MySQL-PostgreSQL goals::
* MySQL-PostgreSQL features::
* MySQL-PostgreSQL benchmarks::
MySQL and PostgreSQL development goals
* MySQL-PostgreSQL features::
* MySQL-PostgreSQL benchmarks::
MySQL Internals
* MySQL threads:: MySQL threads
......@@ -5249,7 +5260,7 @@ clients can connect to both @strong{MySQL} versions.
The extended @strong{MySQL} binary distribution is marked with the
@code{-max} suffix and is configured with the same options as
@code{mysqld-max}. @xref{mysqld-max, @code{mysqld-max}}.
@code{mysqld-max}. @xref{mysqld-max, , @code{mysqld-max}}.
If you want to use the @code{MySQL-Max} RPM, you must first
install the standard @code{MySQL} RPM.
......@@ -5590,7 +5601,7 @@ indicates the type of operating system for which the distribution is intended
@item
If you see a binary distribution marked with the @code{-max} prefix, this
means that the binary has support for transaction-safe tables and other
features. @xref{mysqld-max, @code{mysqld-max}}. Note that all binaries
features. @xref{mysqld-max, , @code{mysqld-max}}. Note that all binaries
are built from the same @strong{MySQL} source distribution.
@item
......@@ -5714,7 +5725,7 @@ You can start the @strong{MySQL} server with the following command:
shell> bin/safe_mysqld --user=mysql &
@end example
@xref{safe_mysqld, @code{safe_mysqld}}.
@xref{safe_mysqld, , @code{safe_mysqld}}.
@xref{Post-installation}.
......@@ -5786,7 +5797,7 @@ installation, you may want to make a copy of your previously installed
@strong{MySQL} startup file if you made any changes to it, so you don't lose
your changes.)
After installing the RPM file(s), the @file{mysqld} daemon should be running
After installing the RPM file(s), the @code{mysqld} daemon should be running
and you should now be able to start using @strong{MySQL}.
@xref{Post-installation}.
......@@ -5822,7 +5833,7 @@ files.
The following sections indicate some of the issues that have been observed
on particular systems when installing @strong{MySQL} from a binary
distribution.
distribution or from RPM files.
@cindex binary distributions, on Linux
@cindex Linux, binary distribution
......@@ -7463,6 +7474,9 @@ Configure @strong{MySQL} with the @code{--with-named-z-libs=no} option.
@node Solaris x86, SunOS, Solaris 2.7, Source install system issues
@subsection Solaris x86 Notes
On Solaris 2.8 on x86, @strong{mysqld} will core dump if you run
'strip' in.
If you are using @code{gcc} or @code{egcs} on Solaris x86 and you
experience problems with core dumps under load, you should use the
following @code{configure} command:
......@@ -7521,6 +7535,11 @@ Linux version that doesn't have @code{glibc2}, you must install
LinuxThreads before trying to compile @strong{MySQL}. You can get
LinuxThreads at @uref{http://www.mysql.com/Downloads/Linux}.
@strong{NOTE:} We have seen some strange problems with Linux 2.2.14 and
@strong{MySQL} on SMP systems; If you have a SMP system, we recommend
you to upgrade to Linux 2.4 ASAP! Your system will be faster and more
stable by doing this!
Note that @code{glibc} versions before and including Version 2.1.1 have
a fatal bug in @code{pthread_mutex_timedwait} handling, which is used
when you do @code{INSERT DELAYED}. We recommend you to not use
......@@ -7673,13 +7692,13 @@ To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV
signal, you can start @code{mysqld} with the @code{--core-file} option. Note
that you also probably need to raise the @code{core file size} by adding
@code{ulimit -c 1000000} to @code{safe_mysqld} or starting @code{safe_mysqld}
with @code{--core-file-sizes=1000000}. @xref{safe_mysqld, @code{safe_mysqld}}.
with @code{--core-file-sizes=1000000}. @xref{safe_mysqld, , @code{safe_mysqld}}.
To get a core dump on Linux if @code{mysqld} dies with a SIGSEGV signal, you can
start @code{mysqld} with the @code{--core-file} option. Note that you also probably
need to raise the @code{core file size} by adding @code{ulimit -c 1000000} to
@code{safe_mysqld} or starting @code{safe_mysqld} with
@code{--core-file-sizes=1000000}. @xref{safe_mysqld, @code{safe_mysqld}}.
@code{--core-file-sizes=1000000}. @xref{safe_mysqld, , @code{safe_mysqld}}.
If you are linking your own @strong{MySQL} client and get the error:
......@@ -8007,7 +8026,7 @@ shell> nohup mysqld [options] &
@code{nohup} causes the command following it to ignore any @code{SIGHUP}
signal sent from the terminal. Alternatively, start the server by running
@code{safe_mysqld}, which invokes @code{mysqld} using @code{nohup} for you.
@xref{safe_mysqld, @code{safe_mysqld}}.
@xref{safe_mysqld, , @code{safe_mysqld}}.
If you get a problem when compiling mysys/get_opt.c, just remove the
line #define _NO_PROTO from the start of that file!
......@@ -8264,7 +8283,7 @@ FreeBSD is also known to have a very low default file handle limit.
safe_mysqld or raise the limits for the @code{mysqld} user in /etc/login.conf
(and rebuild it with cap_mkdb /etc/login.conf). Also be sure you set the
appropriate class for this user in the password file if you are not
using the default (use: chpass mysqld-user-name). @xref{safe_mysqld,
using the default (use: chpass mysqld-user-name). @xref{safe_mysqld, ,
@code{safe_mysqld}}.
If you get problems with the current date in @strong{MySQL}, setting the
......@@ -8717,10 +8736,10 @@ the DCE libraries while you compile @code{gcc} 2.95!
@node HP-UX 11.x, Mac OS X, HP-UX 10.20, Source install system issues
@subsection HP-UX Version 11.x Notes
For HPUX Version 11.x we recommend @strong{MySQL} Version 3.23.15 or later.
For HP-UX Version 11.x we recommend @strong{MySQL} Version 3.23.15 or later.
Because of some critical bugs in the standard HPUX libraries, one should
install the following patches before trying to run @strong{MySQL} on HPUX 11.0:
Because of some critical bugs in the standard HP-UX libraries, you should
install the following patches before trying to run @strong{MySQL} on HP-UX 11.0:
@example
PHKL_22840 Streams cumulative
......@@ -8730,7 +8749,7 @@ PHNE_22397 ARPA cumulative
This will solve a problem that one gets @code{EWOULDBLOCK} from @code{recv()}
and @code{EBADF} from @code{accept()} in threaded applications.
If you are using @code{gcc} 2.95.1 on an unpatched HPUX 11.x system,
If you are using @code{gcc} 2.95.1 on an unpatched HP-UX 11.x system,
you will get the error:
@example
......@@ -8769,8 +8788,8 @@ After this, the following configure line should work:
CFLAGS="-fomit-frame-pointer -O3 -fpic" CXX=gcc CXXFLAGS="-felide-constructors -fno-exceptions -fno-rtti -O3" ./configure --prefix=/usr/local/mysql --disable-shared
@end example
Here is some information that a HPUX Version 11.x user sent us about compiling
@strong{MySQL} with HPUX:x compiler:
Here is some information that a HP-UX Version 11.x user sent us about compiling
@strong{MySQL} with HP-UX:x compiler:
@example
Environment:
......@@ -8880,8 +8899,8 @@ in a while.
@section Windows Notes
This section describes installation and use of @strong{MySQL} on Windows.
This is also described in the @file{README} file that comes with the
@strong{MySQL} Windows distribution.
This information is also provided in the @file{README} file that comes
with the @strong{MySQL} Windows distribution.
@menu
* Windows installation:: Installing @strong{MySQL} on Windows
......@@ -8897,6 +8916,10 @@ This is also described in the @file{README} file that comes with the
@node Windows installation, Win95 start, Windows, Windows
@subsection Installing MySQL on Windows
The following instructions apply to precompiled binary distributions.
If you download a source distribution, you will have to compile and install
it yourself.
If you don't have a copy of the @strong{MySQL} distribution, you should
first download one from @uref{http://www.mysql.com/downloads/mysql-3.23.html}.
......@@ -8909,23 +8932,30 @@ To install either distribution, unzip it in some empty directory and run the
@code{Setup.exe} program.
By default, @strong{MySQL}-Windows is configured to be installed in
@file{C:\mysql}. If you want to install @strong{MySQL} elsewhere, install it
in @file{C:\mysql} first, then move the installation to where you want it. If
you do move @strong{MySQL}, you must tell @code{mysqld} where everything is by
supplying options to @code{mysqld}. Use @code{C:\mysql\bin\mysqld --help} to
display all options! For example, if you have moved the @strong{MySQL}
distribution to @file{D:\programs\mysql}, you must start @code{mysqld} with:
@code{D:\programs\mysql\bin\mysqld --basedir D:\programs\mysql}
@file{C:\mysql}. If you want to install @strong{MySQL} elsewhere,
install it in @file{C:\mysql} first, then move the installation to
where you want it. If you do move @strong{MySQL}, you must indicate
where everything is located by supplying a @code{--basedir} option when
you start the server. For example, if you have moved the @strong{MySQL}
distribution to @file{D:\programs\mysql}, you must start @code{mysqld}
like this:
@example
C:\> D:\programs\mysql\bin\mysqld --basedir D:\programs\mysql
@end example
Use @code{mysqld --help} to display all the options that @code{mysqld}
understands!
With all newer @strong{MySQL} versions, you can also create a
@file{C:\my.cnf} file that holds any default options for the
@strong{MySQL} server. Copy the file @file{\mysql\my-xxxxx.cnf} to
@file{C:\my.cnf} and edit it to suit your setup. Note that you should
specify all paths with @samp{/} instead of @samp{\}. If you use
@samp{\}, you need to specify it twice, as @samp{\} is the escape
@samp{\}, you need to specify it twice, because @samp{\} is the escape
character in @strong{MySQL}. @xref{Option files}.
Starting from @strong{MySQL} 3.23.38, the Windows distribution includes
Starting with @strong{MySQL} 3.23.38, the Windows distribution includes
both the normal and the @strong{MySQL-Max} binaries. The main benefit
of using the normal @code{mysqld.exe} binary is that it's a little
faster and uses less resources.
......@@ -8982,12 +9012,16 @@ You can kill the @strong{MySQL} server by executing:
C:\> C:\mysql\bin\mysqladmin -u root shutdown
@end example
Note that Win95 and Win98 don't support creation of named pipes. On
Win95 and Win98, you can only use named pipes to connect to a remote
@strong{MySQL} running on an NT server.
Note that Win95 and Win98 don't support creation of named pipes.
On Win95 and Win98, you can only use named pipes to connect to a
remote @strong{MySQL} server running on a Windows NT server host.
(The @strong{MySQL} server must also support named pipes, of
course. For example, using @code{mysqld-opt} under NT will not allow
named pipe connections. You should use either @code{mysqld-nt} or
@code{mysqld-max-nt}.)
If @code{mysqld} doesn't start, please check whether or not the
@file{\mysql\mysql.err} file contains any reason for this. You can also
@file{\mysql\data\mysql.err} file contains any reason for this. You can also
try to start the server with @code{mysqld --standalone}; In this case, you may
get some useful information on the screen that may help solve the problem.
......@@ -9041,9 +9075,9 @@ with the default service options. If you have stopped @code{mysqld-nt}, you
have to start it with @code{NET START mysql}.
The service is installed with the name @code{MySQL}. Once installed, it must
be started using the Services Control Manager (SCM) Utility (found in Control
Panel) or by using the @code{NET START MySQL} command. If any options are
desired, they must be specified as ``Startup parameters'' in the SCM utility
be started using the Services Control Manager (SCM) Utility found in the
Control Panel, or by using the @code{NET START MySQL} command. If any options
are desired, they must be specified as ``Startup parameters'' in the SCM utility
before you start the @strong{MySQL} service. Once running, @code{mysqld-nt}
can be stopped using @code{mysqladmin}, or from the SCM utility or by using
the command @code{NET STOP MySQL}. If you use SCM to stop @code{mysqld-nt},
......@@ -9161,14 +9195,12 @@ server, you can do so using this command:
C:\> mysqladmin --user=root --password=your_password shutdown
@end example
If you are using the old shareware version of @strong{MySQL} Version 3.21
under Windows, the above command will fail with an error: @code{parse error
near 'SET OPTION password'}. This is because the old shareware version,
which is based on @strong{MySQL} Version 3.21, doesn't have the
@code{SET PASSWORD} command. The fix is in this case to upgrade to
the Version 3.22 shareware.
If you are using the old shareware version of @strong{MySQL} Version
3.21 under Windows, the above command will fail with an error:
@code{parse error near 'SET OPTION password'}. The fix is in to upgrade
to the current @strong{MySQL} version, which is freely available.
With the newer @strong{MySQL} versions you can easily add new users
With the current @strong{MySQL} versions you can easily add new users
and change privileges with @code{GRANT} and @code{REVOKE} commands.
@xref{GRANT}.
......@@ -9183,7 +9215,7 @@ Here is a note about how to connect to get a secure connection to remote
@itemize @bullet
@item
Install an SSH client on your Windows machine --- As a user, the best non-free
Install an SSH client on your Windows machine. As a user, the best non-free
one I've found is from @code{SecureCRT} from @uref{http://www.vandyke.com/}.
Another option is @code{f-secure} from @uref{http://www.f-secure.com/}. You
can also find some free ones on @strong{Google} at
......@@ -9237,9 +9269,23 @@ Note that the symbolic link will be used only if the directory
For example, if the @strong{MySQL} data directory is @file{C:\mysql\data}
and you want to have database @code{foo} located at @file{D:\data\foo}, you
should create the file @file{C:\mysql\data\foo.sym} that contains the
text @code{D:\data\foo}. After that, all tables created in the database
text @code{D:\data\foo\}. After that, all tables created in the database
@code{foo} will be created in @file{D:\data\foo}.
Note that because of the speed penalty you get when opening every table,
we have not enabled this by default even if you have compiled
@strong{MySQL} with support for this. To enable symlinks you should put
in your @code{my.cnf} or @code{my.ini} file the following entry:
@example
[mysqld]
use-symbolic-links
@end example
In @strong{MySQL} 4.0 we will enable symlinks by default. Then you
should instead use the @code{skip-symlink} option if you want to
disable this.
@cindex compiling, on Windows
@cindex Windows, compiling on
@node Windows compiling, Windows vs Unix, Windows symbolic links, Windows
......@@ -9681,7 +9727,7 @@ mysqld: Can't find file: 'host.frm'
The above may also happen with a binary @strong{MySQL} distribution if you
don't start @strong{MySQL} by executing exactly @code{./bin/safe_mysqld}!
@xref{safe_mysqld, @code{safe_mysqld}}.
@xref{safe_mysqld, , @code{safe_mysqld}}.
You might need to run @code{mysql_install_db} as @code{root}. However,
if you prefer, you can run the @strong{MySQL} server as an unprivileged
......@@ -9982,7 +10028,7 @@ system startup and shutdown, and is described more fully in
@item
By invoking @code{safe_mysqld}, which tries to determine the proper options
for @code{mysqld} and then runs it with those options. @xref{safe_mysqld,
for @code{mysqld} and then runs it with those options. @xref{safe_mysqld, ,
@code{safe_mysqld}}.
@item
......@@ -10039,7 +10085,7 @@ correctly, check the log file to see if you can find out why. Log files
are located in the data directory (typically
@file{/usr/local/mysql/data} for a binary distribution,
@file{/usr/local/var} for a source distribution,
@file{\mysql\mysql.err} on Windows.) Look in the data directory for
@file{\mysql\data\mysql.err} on Windows.) Look in the data directory for
files with names of the form @file{host_name.err} and
@file{host_name.log} where @code{host_name} is the name of your server
host. Then check the last few lines of these files:
......@@ -10130,6 +10176,10 @@ options. @xref{InnoDB start}.
@cindex stopping, the server
@cindex server, starting and stopping
The @code{mysql.server} and @code{safe_mysqld} scripts can be used to start
the server automatically at system startup time. @code{mysql.server} can also
be used to stop the server.
The @code{mysql.server} script can be used to start or stop the server
by invoking it with @code{start} or @code{stop} arguments:
......@@ -10147,9 +10197,8 @@ the @strong{MySQL} installation directory, then invokes @code{safe_mysqld}.
You might need to edit @code{mysql.server} if you have a binary distribution
that you've installed in a non-standard location. Modify it to @code{cd}
into the proper directory before it runs @code{safe_mysqld}. If you want the
server to run as some specific user, you can change the
@code{mysql_daemon_user=root} line to use another user. You can also modify
@code{mysql.server} to pass other options to @code{safe_mysqld}.
server to run as some specific user, add an appropriate @code{user} line
to the @file{/etc/my.cnf} file, as shown later in this section.
@code{mysql.server stop} brings down the server by sending a signal to it.
You can take down the server manually by executing @code{mysqladmin shutdown}.
......@@ -10175,23 +10224,23 @@ this:
datadir=/usr/local/mysql/var
socket=/tmp/mysqld.sock
port=3306
user=mysql
[mysql.server]
user=mysql
basedir=/usr/local/mysql
@end example
The @code{mysql.server} script uses the following variables:
@code{user}, @code{datadir}, @code{basedir}, @code{bindir}, and
@code{pid-file}.
The @code{mysql.server} script understands the following options:
@code{datadir}, @code{basedir}, and @code{pid-file}.
The following table shows which option sections each of the startup script
uses:
The following table shows which option groups each of the startup scripts
read from option files:
@multitable @columnfractions .20 .80
@item @strong{Script} @tab @strong{Option groups}
@item @code{mysqld} @tab @code{mysqld} and @code{server}
@item @code{mysql.server} @tab @code{mysql.server}, @code{mysqld} and @code{server}
@item @code{safe_mysqld} @tab @code{mysql.server}, @code{mysqld} and @code{server}
@item @code{mysql.server} @tab @code{mysql.server}, @code{mysqld}, and @code{server}
@item @code{safe_mysqld} @tab @code{mysql.server}, @code{mysqld}, and @code{server}
@end multitable
@xref{Option files}.
......@@ -10232,7 +10281,7 @@ though.
@item --core-file
Write a core file if @code{mysqld} dies. For some systems you must also
specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld,
specify @code{--core-file-size} to @code{safe_mysqld}. @xref{safe_mysqld, ,
@code{safe_mysqld}}.
@item -h, --datadir=path
......@@ -10413,8 +10462,9 @@ recommended for systems where only local requests are allowed. @xref{DNS}.
Don't use new, possible wrong routines. Implies @code{--skip-delay-key-write}.
This will also set default table type to @code{ISAM}. @xref{ISAM}.
@item --skip-symlinks
Don't delete or rename files that symlinks in the data directory points to.
@item --skip-symlink
Don't delete or rename files that a symlinked file in the data directory
points to.
@item --skip-safemalloc
If @strong{MySQL} is configured with @code{--with-debug=full}, all programs
......@@ -10580,7 +10630,7 @@ interactive-timeout
@tindex .my.cnf file
If you have a source distribution, you will find sample configuration
files named @file{my-xxxx.cnf} in the @file{support-files} directory.
If you have a binary distribution, look in the @file{DIR/share/mysql}
If you have a binary distribution, look in the @file{DIR/support-files}
directory, where @code{DIR} is the pathname to the @strong{MySQL}
installation directory (typically @file{/usr/local/mysql}). Currently
there are sample configuration files for small, medium, large, and very
......@@ -10657,7 +10707,7 @@ The above is the quick and dirty way that one commonly uses for testing.
The nice thing with this is that all connections you do in the above shell
will automatically be directed to the new running server!
If you need to do this more permanently, you should create an own option
If you need to do this more permanently, you should create an option
file for each server. @xref{Option files}. In your startup script that
is executed at boot time (mysql.server?) you should specify for both
servers:
......@@ -11926,7 +11976,7 @@ system. This section describes how it works.
* Request access:: Access control, stage 2: Request verification
* Privilege changes:: When privilege changes take effect
* Default privileges:: Setting up the initial @strong{MySQL} privileges
* Adding users:: Adding new user privileges to @strong{MySQL}
* Adding users:: Adding new users to @strong{MySQL}
* Passwords:: How to set up passwords
* Access denied:: Causes of @code{Access denied} errors
@end menu
......@@ -11955,9 +12005,10 @@ When running @strong{MySQL}, follow these guidelines whenever possible:
@itemize @bullet
@item
DON'T EVER GIVE ANYONE (EXCEPT THE @strong{MySQL} ROOT USER) ACCESS TO THE
mysql.user TABLE! The encrypted password is the real password in
@strong{MySQL}. If you know this for one user, you can easily log in as
him if you have access to his 'host'.
@code{user} TABLE IN THE @code{mysql} DATABASE! The encrypted password
is the real password in @strong{MySQL}. If you know the password listed in
the @code{user} table for a given user, you can easily log in as that
user if you have access to the host listed for that account.
@item
Learn the @strong{MySQL} access privilege system. The @code{GRANT} and
......@@ -11986,15 +12037,15 @@ computer becomes compromised, the intruder can take the full list of
passwords and use them. Instead use @code{MD5()} or another one-way
hashing function.
@item
Do not use passwords from dictionaries. There are special programs to
Do not choose passwords from dictionaries. There are special programs to
break them. Even passwords like ``xfish98'' are very bad. Much better is
``duag98'' which contains the same word ``fish'' but typed one key to the
left on a standard QWERTY keyboard. Another method is to use ``Mhall'' which
is taken from the first characters of each word in the sentence ``Mary had
a little lamb.'' This is easy to remember and type, but hard to guess for
someone who does not know it.
a little lamb.'' This is easy to remember and type, but difficult to guess
for someone who does not know it.
@item
Invest in a firewall. This protects from at least 50% of all types of
Invest in a firewall. This protects you from at least 50% of all types of
exploits in any software. Put @strong{MySQL} behind the firewall or in
a demilitarized zone (DMZ).
......@@ -12003,11 +12054,16 @@ Checklist:
@item
Try to scan your ports from the Internet using a tool such as
@code{nmap}. @strong{MySQL} uses port 3306 by default. This port should
be inaccessible from untrusted hosts. Another simple way to check whether or
not your @strong{MySQL} port is open is to type @code{telnet
server_host 3306} from some remote machine, where
@code{server_host} is the hostname of your @strong{MySQL}
server. If you get a connection and some garbage characters, the port is
be inaccessible from untrusted hosts. Another simple way to check whether
or not your @strong{MySQL} port is open is to try the following command
from some remote machine, where @code{server_host} is the hostname of
your @strong{MySQL} server:
@example
shell> telnet server_host 3306
@end example
If you get a connection and some garbage characters, the port is
open, and should be closed on your firewall or router, unless you really
have a good reason to keep it open. If @code{telnet} just hangs or the
connection is refused, everything is OK; the port is blocked.
......@@ -12063,11 +12119,13 @@ not give your applications any more access privileges than they need.
Users of PHP:
@itemize @bullet
@item Check out the @code{addslashes()} function.
As of PHP 4.0.3, a @code{mysql_escape_string()} function is available
that is based on the function of the same name in the @strong{MySQL} C API.
@end itemize
@item
Users of @strong{MySQL} C API:
@itemize @bullet
@item Check out the @code{mysql_escape()} API call.
@item Check out the @code{mysql_escape_string()} API call.
@end itemize
@item
Users of @strong{MySQL}++:
......@@ -12079,6 +12137,11 @@ Users of Perl DBI:
@itemize @bullet
@item Check out the @code{quote()} method or use placeholders.
@end itemize
@item
Users of Java JDBC:
@itemize @bullet
@item Use a @code{PreparedStatement} object and placeholders.
@end itemize
@end itemize
@item
......@@ -12114,15 +12177,15 @@ connection, however the encryption algorithm is not very strong, and
with some effort a clever attacker can crack the password if he is able
to sniff the traffic between the client and the server. If the
connection between the client and the server goes through an untrusted
network, you should use an @strong{SSH} tunnel to encrypt the
network, you should use an SSH tunnel to encrypt the
communication.
All other information is transferred as text that can be read by anyone
who is able to watch the connection. If you are concerned about this,
you can use the compressed protocol (in @strong{MySQL} Version 3.22 and above)
to make things much harder. To make things even more secure you should use
@code{ssh}. You can find an open source ssh client at
@uref{http://www.openssh.org}, and a commercial ssh client at
@code{ssh}. You can find an open source @code{ssh} client at
@uref{http://www.openssh.org}, and a commercial @code{ssh} client at
@uref{http://www.ssh.com}. With this, you can get an encrypted TCP/IP
connection between a @strong{MySQL} server and a @strong{MySQL} client.
......@@ -12147,41 +12210,43 @@ mysql> FLUSH PRIVILEGES;
@end example
@item
Don't run the @strong{MySQL} daemon as the Unix @code{root} user.
It is very dangerous as any user with @code{FILE} privileges will be able to
create files
as @code{root} (for example, @code{~root/.bashrc}). To prevent this
@code{mysqld} will refuse to run as @code{root} unless it is specified
directly via @code{--user=root} option.
Don't run the @strong{MySQL} daemon as the Unix @code{root} user. This is
very dangerous, because any user with @code{FILE} privileges will be able
to create files as @code{root} (for example, @code{~root/.bashrc}). To
prevent this, @code{mysqld} will refuse to run as @code{root} unless it
is specified directly using a @code{--user=root} option.
@code{mysqld} can be run as any user instead. You can also create a new
Unix user @code{mysql} to make everything even more secure. If you run
@code{mysqld} as another Unix user, you don't need to change the
@code{root} user name in the @code{user} table, because @strong{MySQL}
user names have nothing to do with Unix user names. You can edit the
@code{mysql.server} script to start @code{mysqld} as another Unix user.
Normally this is done with the @code{su} command. For more details, see
@ref{Changing MySQL user, , Changing @strong{MySQL} user}.
@code{mysqld} can be run as an ordinary unprivileged user instead.
You can also create a new Unix user @code{mysql} to make everything
even more secure. If you run @code{mysqld} as another Unix user,
you don't need to change the @code{root} user name in the @code{user}
table, because @strong{MySQL} user names have nothing to do with Unix
user names. To start @code{mysqld} as another Unix user, add a @code{user}
line that specifies the user name to the @code{[mysqld]} group of the
@file{/etc/my.cnf} option file or the @file{my.cnf} option file in the
server's data directory. For example:
@example
[mysqld]
user=mysql
@end example
This will cause the server to start as the designated user whether you
start it manually or by using @code{safe_mysqld} or @code{mysql.server}.
For more details, see @ref{Changing MySQL user, , Changing @strong{MySQL}
user}.
@item
Don't support symlinks to tables (This can be disabled with the
@code{--skip-symlinks} option. This is especially important if you run
@code{--skip-symlink} option. This is especially important if you run
@code{mysqld} as root as anyone that has write access to the mysqld data
directories could then delete any file in the system!
@xref{Symbolic links to tables}.
@item
If you put a password for the Unix @code{root} user in the @code{mysql.server}
script, make sure this script is readable only by @code{root}.
@item
Check that the Unix user that @code{mysqld} runs as is the only user with
read/write privileges in the database directories.
@item
On Unix platforms, do not run @code{mysqld} as root unless you really
need to. Consider creating a user named @code{mysql} for that purpose.
@item
Don't give the @strong{process} privilege to all users. The output of
@code{mysqladmin processlist} shows the text of the currently executing
......@@ -12282,7 +12347,6 @@ DATA INFILE} and administrative operations.
@cindex user names, and passwords
@cindex passwords, for users
There are several distinctions between the way user names and passwords are
used by @strong{MySQL} and the way they are used by Unix or Windows:
......@@ -12316,6 +12380,42 @@ knowing your 'scrambled' password is enough to be able to connect to
the @strong{MySQL} server!
@end itemize
@strong{MySQL} users and they privileges are normally created with the
@code{GRANT} command. @xref{GRANT}.
When you login to a @strong{MySQL} server with a command line client you
should specify the password with @code{--password=your-password}.
@xref{Connecting}.
@example
mysql --user=monty --password=guess database_name
@end example
If you want the client to prompt for a password, you should use
@code{--password} without any argument
@example
mysql --user=monty --password database_name
@end example
or the short form:
@example
mysql -u monty -p database_name
@end example
Note that in the last example the password is @strong{NOT} 'database_name'.
If you want to use the -p option to supply a password you should do like this:
@example
mysql -u monty -pguess database_name
@end example
On some system the library call that @strong{MySQL} uses to prompt for a
password will automaticly cut the password to 8 characters. Internally
@strong{MySQL} doesn't have any limit for the length of the password.
@node Connecting, Password security, User names, Privilege system
@section Connecting to the MySQL Server
@cindex connecting, to the server
......@@ -13375,12 +13475,15 @@ running @code{mysql_install_db}.
@findex GRANT statement
@findex statements, GRANT
@node Adding users, Passwords, Default privileges, Privilege system
@section Adding New User Privileges to MySQL
@section Adding New Users to MySQL
You can add users two different ways: by using @code{GRANT} statements
or by manipulating the @strong{MySQL} grant tables directly. The
preferred method is to use @code{GRANT} statements, because they are
more concise and less error-prone.
more concise and less error-prone. @xref{GRANT}.
There is also a lot of contributed programs like @code{phpmyadmin} that
can be used to create and administrate users. @xref{Contrib}.
The examples below show how to use the @code{mysql} client to set up new
users. These examples assume that privileges are set up according to the
......@@ -13491,6 +13594,11 @@ mysql> GRANT SELECT,INSERT,UPDATE,DELETE,CREATE,DROP
IDENTIFIED BY 'stupid';
@end example
The reason that we do to grant statements for the user 'custom' is that
we want the give the user access to @strong{MySQL} both from the local
machine with Unix sockets and from the remote machine 'whitehouse.gov'
over TCP/IP.
To set up the user's privileges by modifying the grant tables directly,
run these commands (note the @code{FLUSH PRIVILEGES} at the end):
......@@ -19164,7 +19272,7 @@ detect duplicated @code{UNIQUE} keys.
By using @code{DATA DIRECTORY="directory"} or @code{INDEX
DIRECTORY="directory"} you can specify where the table handler should
put it's table and index files. This only works for @code{MyISAM} tables
in @code{MySQL} 4.0, when you are not using the @code{--skip-symlinks}
in @code{MySQL} 4.0, when you are not using the @code{--skip-symlink}
option. @xref{Symbolic links to tables}.
@end itemize
......@@ -19486,6 +19594,11 @@ sequence number by executing @code{SET INSERT_ID=#} before
@code{ALTER TABLE} or using the @code{AUTO_INCREMENT = #} table option.
@xref{SET OPTION}.
With MyISAM tables, if you don't change the @code{AUTO_INCREMENT}
column, the sequence number will not be affected. If you drop an
@code{AUTO_INCREMENT} column and then add another @code{AUTO_INCREMENT}
column, the numbers will start from 1 again.
@xref{ALTER TABLE problems}.
@findex RENAME TABLE
......@@ -23030,8 +23143,9 @@ REVOKE priv_type [(column_list)] [, priv_type [(column_list)] ...]
@code{GRANT} is implemented in @strong{MySQL} Version 3.22.11 or later. For
earlier @strong{MySQL} versions, the @code{GRANT} statement does nothing.
The @code{GRANT} and @code{REVOKE} commands allow system administrators to
grant and revoke rights to @strong{MySQL} users at four privilege levels:
The @code{GRANT} and @code{REVOKE} commands allow system administrators
to create users and grant and revoke rights to @strong{MySQL} users at
four privilege levels:
@table @strong
@item Global level
......@@ -23051,6 +23165,7 @@ Column privileges apply to single columns in a given table. These privileges are
stored in the @code{mysql.columns_priv} table.
@end table
If you give a grant for a users that doesn't exists, that user is created.
For examples of how @code{GRANT} works, see @ref{Adding users}.
For the @code{GRANT} and @code{REVOKE} statements, @code{priv_type} may be
......@@ -24434,7 +24549,7 @@ this. @xref{Table handler support}.
If you have downloaded a binary version of @strong{MySQL} that includes
support for BerkeleyDB, simply follow the instructions for installing a
binary version of @strong{MySQL}.
@xref{Installing binary}. @xref{mysqld-max, @code{mysqld-max}}.
@xref{Installing binary}. @xref{mysqld-max, , @code{mysqld-max}}.
To compile @strong{MySQL} with Berkeley DB support, download @strong{MySQL}
Version 3.23.34 or newer and configure @code{MySQL} with the
......@@ -25546,7 +25661,7 @@ binary.
If you have downloaded a binary version of @strong{MySQL} that includes
support for InnoDB (mysqld-max), simply follow the instructions for
installing a binary version of @strong{MySQL}. @xref{Installing binary}.
@xref{mysqld-max, @code{mysqld-max}}.
@xref{mysqld-max, , @code{mysqld-max}}.
To compile @strong{MySQL} with InnoDB support, download MySQL-3.23.37 or newer
and configure @code{MySQL} with the @code{--with-innodb} option.
......@@ -25717,7 +25832,7 @@ in its own lock table and rolls back the transaction. If you use
than InnoDB in the same transaction, then a deadlock may arise which
InnoDB cannot notice. In cases like this the timeout is useful to
resolve the situation.
@item @code{innodb_unix_file_flush_method} @tab
@item @code{innodb_flush_method} @tab
(Available from 3.23.39 up.)
The default value for this is @code{fdatasync}.
Another option is @code{O_DSYNC}.
......@@ -26325,7 +26440,7 @@ In some versions of Linux and Unix, flushing files to disk with the Unix
@code{fdatasync} and other similar methods is surprisingly slow.
The default method InnoDB uses is the @code{fdatasync} function.
If you are not satisfied with the database write performance, you may
try setting @code{innodb_unix_file_flush_method} in @file{my.cnf}
try setting @code{innodb_flush_method} in @file{my.cnf}
to @code{O_DSYNC}, though O_DSYNC seems to be slower on most systems.
You can also try setting it to @code{littlesync}, which means that
InnoDB does not call the file flush for every write it does to a
......@@ -26797,7 +26912,7 @@ Contact information of Innobase Oy, producer of the InnoDB engine:
@example
Website: www.innodb.com
Heikki.Tuuri@@innobase.inet.fi
Heikki.Tuuri@@innodb.com
phone: 358-9-6969 3250 (office) 358-40-5617367 (mobile)
InnoDB Oy Inc.
World Trade Center Helsinki
......@@ -31190,12 +31305,12 @@ If you use @code{ALTER TABLE RENAME} to move a table to another database,
then the table will be moved to the other database directory and the old
symlinks and the files they pointed to will be deleted.
@item
If you are not using symlinks you should use the @code{--skip-symlinks}
If you are not using symlinks you should use the @code{--skip-symlink}
option to @code{mysqld} to ensure that no one can drop or rename a file
outside of the @code{mysqld} data directory.
@end itemize
Things that are not yet fully supported:
Things that are not yet supported:
@cindex TODO, symlinks
@itemize @bullet
......@@ -33189,7 +33304,7 @@ with the @code{-max} prefix. This makes it very easy to test out a
another @code{mysqld} binary in an existing installation. Just
run @code{configure} with the options you want and then install the
new @code{mysqld} binary as @code{mysqld-max} in the same directory
where your old @code{mysqld} binary is. @xref{safe_mysqld, @code{safe_mysqld}}.
where your old @code{mysqld} binary is. @xref{safe_mysqld, , @code{safe_mysqld}}.
The @code{mysqld-max} RPM uses the above mentioned @code{safe_mysqld}
feature. It just installs the @code{mysqld-max} executable and
......@@ -33202,7 +33317,7 @@ binaries includes:
@multitable @columnfractions .4 .3 .3
@item @strong{System} @tab @strong{BDB} @tab @strong{InnoDB}
@item AIX 4.3 @tab N @tab Y
@item HPUX 11.0 @tab N @tab Y
@item HP-UX 11.0 @tab N @tab Y
@item Linux-Alpha @tab N @tab Y
@item Linux-Intel @tab Y @tab Y
@item Linux-Ia64 @tab N @tab Y
......@@ -33437,7 +33552,7 @@ MY_PWD=`pwd` Check if we are starting this relative (for the binary
release) if test -d /data/mysql -a -f ./share/mysql/english/errmsg.sys
-a -x ./bin/mysqld
--------------------------------------------------------------------------
@xref{safe_mysqld, @code{safe_mysqld}}.
@xref{safe_mysqld, , @code{safe_mysqld}}.
@end example
The above test should be successful, or you may encounter problems.
@item
......@@ -33965,7 +34080,7 @@ server). The dump will contain SQL statements to create the table
and/or populate the table.
If you are doing a backup on the server, you should consider using
the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy, @code{mysqlhotcopy}}.
the @code{mysqlhotcopy} instead. @xref{mysqlhotcopy, , @code{mysqlhotcopy}}.
@example
shell> mysqldump [OPTIONS] database [tables]
......@@ -35084,7 +35199,8 @@ This can be used to get faster inserts! Deactivated indexes can be
reactivated by using @code{myisamchk -r}. keys.
@item -l or --no-symlinks
Do not follow symbolic links. Normally @code{myisamchk} repairs the
table a symlink points at.
table a symlink points at. This option doesn't exist in MySQL 4.0,
as MySQL 4.0 will not remove symlinks during repair.
@item -r or --recover
Can fix almost anything except unique keys that aren't unique
(which is an extremely unlikely error with ISAM/MyISAM tables).
......@@ -38518,11 +38634,15 @@ user and use the @code{--user=user_name} option. @code{mysqld} will switch
to run as the Unix user @code{user_name} before accepting any connections.
@item
If you are using the @code{mysql.server} script to start @code{mysqld} when
the system is rebooted, you should edit @code{mysql.server} to use @code{su}
to run @code{mysqld} as user @code{user_name}, or to invoke @code{mysqld}
with the @code{--user} option. (No changes to @code{safe_mysqld} are
necessary.)
To start the server as the given user name automatically at system
startup time, add a @code{user} line that specifies the user name to
the @code{[mysqld]} group of the @file{/etc/my.cnf} option file or the
@file{my.cnf} option file in the server's data directory. For example:
@example
[mysqld]
user=user_name
@end example
@end enumerate
At this point, your @code{mysqld} process should be running fine and dandy as
......@@ -39170,8 +39290,8 @@ If you want to make a SQL level backup of a table, you can use
TABLE}. @xref{SELECT}. @xref{BACKUP TABLE}.
Another way to back up a database is to use the @code{mysqldump} program or
the @code{mysqlhotcopy script}. @xref{mysqldump, @code{mysqldump}}.
@xref{mysqlhotcopy, @code{mysqlhotcopy}}.
the @code{mysqlhotcopy script}. @xref{mysqldump, , @code{mysqldump}}.
@xref{mysqlhotcopy, , @code{mysqlhotcopy}}.
@enumerate
@item
......@@ -39263,7 +39383,8 @@ be an Internet service provider that wants to provide independent
If you want to run multiple servers, the easiest way is to compile the servers
with different TCP/IP ports and socket files so they are not
both listening to the same TCP/IP port or socket file. @xref{mysqld_multi}.
both listening to the same TCP/IP port or socket file. @xref{mysqld_multi, ,
@code{mysqld_multi}}.
Assume an existing server is configured for the default port number and
socket file. Then configure the new server with a @code{configure} command
......@@ -39410,7 +39531,7 @@ switch to a new log) by executing @code{FLUSH LOGS}. @xref{FLUSH}.
@code{mysqld} writes all errors to the stderr, which the
@code{safe_mysqld} script redirects to a file called
@code{'hostname'.err}. (On Windows, @code{mysqld} writes this directly
to @file{mysql.err}).
to @file{\mysql\data\mysql.err}).
This contains information indicating when @code{mysqld} was started and
stopped and also any critical errors found when running. If @code{mysqld}
......@@ -41633,7 +41754,7 @@ query string.)
If you want to know if the query should return a result set or not, you can
use @code{mysql_field_count()} to check for this.
@xref{mysql_field_count, @code{mysql_field_count}}.
@xref{mysql_field_count, , @code{mysql_field_count}}.
@subsubheading Return Values
......@@ -42500,6 +42621,9 @@ For more information on Object Oriented Programming
@uref{http://language.perl.com/info/documentation.html}
@end example
Note that if you want to use transactions with Perl, you need to have
@code{Msql-Mysql-modules} version 1.2216 or newer.
Installation instructions for @strong{MySQL} Perl support are given in
@ref{Perl support}.
......@@ -43565,53 +43689,518 @@ users.
@item
@end table
@cindex PostgreSQL, comparison
@cindex PostgreSQL/MySQL, overview
@node Compare PostgreSQL, , Compare mSQL, Comparisons
@section How MySQL Compares to PostgreSQL
When reading the following, please note that both products are
continually evolving. We at @strong{MySQL AB} and the PostgreSQL
developers are both working on making our respective database as good as
possible, so we are both a serious choice to any commercial database.
The following comparison is made by us at MySQL AB. We have tried to be
as accurate and fair as possible, but because we don't have a full
knowledge of all PostgreSQL features while we know MySQL througly, we
may have got some things wrong. We will however correct these when they
come to our attention.
We would first like to note that @code{PostgreSQL} and @strong{MySQL}
are both widely used products, but their design goals are completely
different. This means that for some applications @strong{MySQL} is more
suitable and for others @code{PostgreSQL} is more suitable. When
choosing which database to use, you should first check if the database's
feature set is good enough to satisfy your application. If you need
speed, @strong{MySQL} is probably your best choice. If you need some
of the extra features that @code{PostgreSQL} can offer, you should use
are both widely used products, but with different design goals, even if
we are both striving to be ANSI SQL compatible. This means that for
some applications @strong{MySQL} is more suitable and for others
@code{PostgreSQL} is more suitable. When choosing which database to
use, you should first check if the database's feature set satisfies your
application. If you need speed, @strong{MySQL} is probably your best
choice. If you need some of the extra features that only @code{PostgreSQL}
can offer, you should use @code{PostgreSQL}.
@cindex PostgreSQL/MySQL, strategies
@menu
* MySQL-PostgreSQL goals:: MySQL and PostgreSQL development strategies
* MySQL-PostgreSQL features:: Featurevise Comparison of MySQL and PostgreSQL
* MySQL-PostgreSQL benchmarks:: Benchmarking MySQL and PostgreSQL
@end menu
@node MySQL-PostgreSQL goals, MySQL-PostgreSQL features, Compare PostgreSQL, Compare PostgreSQL
@subsection MySQL and PostgreSQL development strategies
When adding things to MySQL we take pride to do an optimal, definite
solution. The code should be so good that we shouldn't have any need to
change it in the foreseeable future. We also do not like to sacrifice
speed for features but instead will do our utmost to find a solution
that will give maximal throughput. This means that development will take
a little longer, but the end result will be well worth this. This kind
of development is only possible because all server code are checked by
one of a few (currently two) persons before it's included in the
@strong{MySQL} server.
We at MySQL AB believe in frequent releases to be able to push out new
features quickly to our users. Because of this we do a new small release
about every 3 weeks, which a major branch every year. All releases are
throughly tested with our testing tools on a lot of different platforms.
PostgreSQL is based on a kernel with lots of contributors. In this setup
it makes sense to prioritize adding a lot of new features, instead of
implementing them optimally, because one can always optimize things
later if there arises a need for this.
Another big difference between @strong{MySQL} and PostgreSQL is that
nearly all of the code in the MySQL server are coded by developers that
are employed by MySQL AB and are still working on the server code. The
exceptions are the transaction engines and the regexp library.
This is in sharp contrast to the PostgreSQL code where the majority of
the code is coded by a big group of people with different backgrounds.
It was only recently that the PostgreSQL developers announced that they
current developer group had finally had time to take a look at all
the code in the current PostgreSQL release.
Both of the above development methods has it's own merits and drawbacks.
We here at @strong{MySQL AB} think of course that our model is better
because our model gives better code consistence, more optimal and
reusable code and, in our opinion, fewer bugs. Because we are the
authors of the @strong{MySQL} server code we are better able to
coordinate new features and releases.
@cindex PostgreSQL/MySQL, features
@node MySQL-PostgreSQL features, MySQL-PostgreSQL benchmarks, MySQL-PostgreSQL goals, Compare PostgreSQL
@subsection Featurevise Comparison of MySQL and PostgreSQL
On the @uref{http://www.mysql.com/information/crash-me.php, crash-me}
page you can find a list of those database constructs and limits that
one can detect automatically with a program. Note however that a lot of
the numerical limits may be changed with startup options for respective
database. The above web page is however extremely useful when you want to
ensure that your applications works with many different databases or
when you want to convert your application from one datbase to another.
@strong{MySQL} offers the following advantages over PostgreSQL:
@itemize @bullet
@item
@code{MySQL} is generally much faster than PostgreSQL.
@xref{MySQL-PostgreSQL benchmarks}.
@item
Because @strong{MySQL} has a much larger user base than PostgreSQL the
code is more tested and has historically been more stable than
PostgreSQL. @strong{MySQL} is the much more used in production
environments than PostgreSQL, mostly thanks to that @strong{MySQL AB},
former TCX DataKonsult AB, has provided top quality commercial support
for @strong{MySQL} from the day it was released, whereas until recently
PostgreSQL was unsupported.
@item
@strong{MySQL} works on more platforms than PostgreSQL. @xref{Which OS}.
@item
@strong{MySQL} works better on Windows; @strong{MySQL} is running as a
native windows application (a service on NT/Win2000/WinXP), while
PostgreSQL is run under the cygwin emulation. We have heard that
PostgreSQL is not yet that stable on windows but we haven't been able to
verify this ourselves.
@item
@strong{MySQL} has more API to other languages and is supported by more
programs than PostgreSQL. @xref{Contrib}.
@item
@strong{MySQL} works on 24/7 heavy duty systems. In most circumstances
you never have to run any cleanups on @code{MySQL}. PostgreSQL doesn't
yet support 24/7 systems because you have have to run @code{vacuum()}
once in a while to reclaim space from @code{UPDATE} and @code{DELETE}
commands and to perform statistics analyzes that are critical to get
good performance with PostgreSQL. Vacuum is also needed after adding
a lot of new rows to a table. On a busy system with lots of changes
vacuum must be run very frequently, in the worst cases even many times a
day. During the @code{vacuum()} run, which may take hours if the
database is big, the database is from a production standpoint
practically dead. The PostgreSQL team has fixing this on their TODO,
but we assume that this is not an easy thing to fix permanently.
@item
A working, tested replication feature used by sites like
@uref{http://finance.yahoo.com, Yahoo finance},
@uref{http://www.mobile.de/,mobile.de} and
@uref{http://www.slashdot.org,Slashdot}.
@item
Included in the @strong{MySQL} distribution is included two different
testing suits (@file{mysql-test-run} and
@uref{http://www.mysql.com/information/crash-me.php,crash-me}) and a
benchmark suite. The test system is actively updated with code to test
each new feature and almost all repeatable bugs that comes to our
attention. We test @strong{MySQL} with these on a lot of platforms
before every release. These tests are more sofisticated than anything
have seen from PostgreSQL and ensures that the @strong{MySQL} code keeps
at a high standard.
@item
There are far moore books in print on @strong{MySQL} than on PostgreSQL.
O'Reilly, Sams, Que, and New Riders are all major publishers with books
about MySQL. All @strong{MySQL} features is also documented in the
@strong{MySQL} on-line manual because when a feature is implemented, the
@strong{MySQL} developers are required to document it before it's
included in the source.
@item
@strong{MySQL} has supports more of the standard ODBC functions than
@code{PostgreSQL}.
@item
@strong{MySQL} has a much more sophisticated @code{ALTER TABLE}.
@item
@strong{MySQL} has support for tables without transactions for
applications that need all speed they can get. The tables may be memory
based,@code{HEAP} tables or disk based @code{MyISAM}. @xref{Table types}.
@item
@strong{MySQL} has support for 3 different table handles that support
transactions (@code{BDB}, @code{InnoDB} and @code{Gemini}. Because
every transaction engine performs differently under different
conditions, this gives the application writer more options to find an
optimal solution for his/her setup. @xref{Table types}.
@item
@code{MERGE} tables gives you a unique way to instantly make a view over
a set of identical tables and use these as one. This is perfectly for
systems where you have log files that you order for example by month.
@xref{MERGE}.
@item
The option to compress read-only tables, but still have direct access to
the rows in the table, gives you better performance by minimizing disk
reads. This is very useful when you are archiving
things.@xref{myisampack}.
@item
@strong{MySQL} has internal support for text search. @xref{Fulltext Search}.
@item
You can access many databases from the same connection (depending of course
on your privileges).
@item
@strong{MySQL} is coded from the start with multi-threading while
PostgreSQL uses processes. Because context switching and access to
common storage areas is much faster between threads, than are separate
processes, this gives @strong{MySQL} a big speed advantage in multi-user
applications and also makes it easier for @strong{MySQL} to take full
advantage of symmetric multiprocessor systems (SMP).
@item
@strong{MySQL} has a much more sophisticated privilege system than
PostgreSQL. While PostgreSQL only supports @code{INSERT},
@code{SELECT}, @code{update/delete} grants per user on a database or a
table @strong{MySQL} allows you to define a full set of different
privileges on database, table and columns level. @strong{MySQL} also allows
you to specify the privilege on host+user combinations. @xref{GRANT}.
@item
@strong{MySQL} supports a compressed server/client protocol which
improves performance over slow links.
@item
@strong{MySQL} employs the table handler concept and is the only
relational database we know of built around this concept. This allows
different low level table types to be swapped into the SQL engine, each
table type optimized for a different performance characteristics.
@item
All @code{MySQL} table types (except @strong{InnoDB}) are implemented as
files (ie: one table per file), which makes it really easy to backup,
move, delete and even symlink databases and tables when the server is
down.
@item
Tools to repair and optimize @strong{MyISAM} tables (the most common
@strong{MySQL} table type). A repair tool is only needed when a
physical corruption of a data file happens, usually from a hardware
failure. It allows a majority of the data to be recovered.
@item
Upgrading @strong{MySQL} is painless. When you are upgrading @strong{MySQL},
you don't need to dump/restore your data, as you have to do with most
PostgreSQL upgrades.
@end itemize
Drawbacks with @strong{MySQL} compared to PostgreSQL:
@itemize @bullet
@item
The transaction support in @strong{MySQL} is not yet as well tested as
PostgreSQL's system.
@item
Because @strong{MySQL} uses threads, which are still a moving target on
many OS, one must either use binaries from
@uref{http://www.mysql.com/downloads} or carefully follow our
instructions on
@uref{http://www.mysql.com/doc/I/n/Installing_source.html} to get an
optimal binary that works in all cases.
@item
Table locking, as used by the non-transactional @code{MyISAM} tables, is
in many cases faster than page locks, row locks or versioning. The
drawback however is that if one doesn't take into account how table
locks works, a single long-running query can block a table for updates
for a long time. This can usable be avoided when designing the
application. If not, one can always switch the trouble table to use one
of the transactional table types. @xref{Table locking}.
@item
With UDF (user defined functions) one can extend @strong{MySQL} with
both normal SQL functions and aggregates, but this is not as easy or as
flexible as in PostgreSQL. @xref{Adding functions}.
@item
Updates and deletes that goes over multiple tables is harder to do in
@strong{MySQL}. (Will be fixed in @strong{MySQL} 4.0 with multi-table
@code{DELETE} and multi-table @code{UPDATE} and in @strong{MySQL} 4.1
with @code{SUB-SELECT})
@end itemize
PostgreSQL offers currently the following advantages over @strong{MySQL}:
Note that because we know the @strong{MySQL} road map, we have included
in the following table the version when @strong{MySQL} should support
this feature. Unfortunately we couldn't do this for previous comparison,
because we don't know the PostgreSQL roadmap.
@multitable @columnfractions .70 .30
@item @strong{Feature} @tab @strong{MySQL version}
@item Subselects @tab 4.1
@item Foreign keys @tab 4.0 and 4.1
@item Views. @tab 4.2
@item Stored procedures in multiple languages @tab 4.1
@item Extensible type system. @tab Not planed
@item Unions @tab 4.0.
@item Full join. @tab 4.0 or 4.1.
@item Triggers. @tab 4.1
@item Constrainst @tab 4.1
@item Cursors @tab 4.1 or 4.2
@item Extensible index types like R-trees @tab R-trees are planned to 4.2
@item Inherited tables @tab Not planned
@end multitable
@code{PostgreSQL} has some more advanced features like user-defined
types, triggers, rules, and some transaction support (currently it
has about the same semantics as @strong{MySQL}'s transactions in that the
transaction is not 100% atomic). However, PostgreSQL lacks many of the
standard types and functions from ANSI SQL and ODBC. See the @code{crash-me}
Web page (@uref{http://www.mysql.com/information/crash-me.php}) for a complete
list of limits and which types and functions are supported or unsupported.
Other reasons to use PostgreSQL:
Normally, @code{PostgreSQL} is a magnitude slower than @strong{MySQL}.
@xref{Benchmarks}. This is due largely to the fact that they have only
transaction-safe tables and that their transactions system is not as
sophisticated as Berkeley DB's. In @strong{MySQL} you can decide per
table if you want the table to be fast or take the speed penalty of
making it transaction-safe.
@itemize @bullet
@item
Standard usage is in PostgreSQL closer to ANSI SQL in some cases.
@item
One can get speed up PostgreSQL by coding things as stored procedures.
@item
Bigger team of developers that contributes to the server.
@end itemize
The most important things that @code{PostgreSQL} supports that @strong{MySQL}
doesn't yet support:
Drawbacks with PostgreSQL compared to @strong{MySQL}:
@table @code
@item Sub select
@item Foreign keys
@item Stored procedures
@item An extendable type system.
@item A way to extend the SQL to handle new key types (like R-trees)
@end table
@itemize @bullet
@item
@code{Vaccum()} makes PostgreSQL hard to use in a 24/7 environment.
@item
Only transactional tables.
@item
Much slower insert/delete/update.
@end itemize
@strong{MySQL}, on the other hand, supports many ANSI SQL constructs
that @code{PostgreSQL} doesn't support. Most of these can be found at the
@uref{http://www.mysql.com/information/crash-me.php, @code{crash-me} Web page}.
For a complete list of drawbacks, you should also examine the first table
in this section.
If you really need the rich type system @code{PostgreSQL} offers and you
can afford the speed penalty of having to do everything transaction
safe, you should take a look at @code{PostgreSQL}.
@cindex PostgreSQL/MySQL, benchmarks
@node MySQL-PostgreSQL benchmarks, , MySQL-PostgreSQL features, Compare PostgreSQL
@subsection Benchmarking MySQL and PostgreSQL
The only open source benchmark, that we know of, that can be used to
benchmark @strong{MySQL} and PostgreSQL (and other databases) is our
own. It can be found at:
@uref{http://www.mysql.com/information/benchmarks.html}.
We have many times asked the PostgreSQL developers and some PostgreSQL
users to help us extend this benchmark to make the definitive benchmark
for databases, but unfortunately we haven't got any feedback for this.
We, the @strong{MySQL} developers, have because of this spent a lot of
hours to get maximum performance from PostgreSQL for the benchmarks, but
because we don't know PostgreSQL intimately we are sure that there are
things that we have missed. We have on the benchmark page documented
exactly how we did run the benchmark so that it should be easy for
anyone to repeat and verify our results.
The benchmarks are usually run with and without the @code{--fast}
option. When run with @code{--fast} we are trying to use every trick
the server can do to get the code to execute as fast as possible.
The idea is that the normal run should show how the server would work in
a default setup and the @code{--fast} run shows how the server would do
if the application developer would use extensions in the server to make
his application run faster.
When running with PostgreSQL and @code{--fast} we do a @code{vacuum()}
between after every major table update and drop table to make the database
in perfect shape for the following selects. The time for vacuum() is
measured separately.
When running with PostgreSQL 7.1.1 we could however not run with
@code{--fast} because during the insert test, the postmaster (the
PostgreSQL deamon) died and the database was so corrupted that it was
impossible to restart postmaster. (The details about the machine we run
the benchmark can be found on the benchmark page). After this happened
twice, we decided to postpone the @code{--fast} test until next
PostgreSQL release.
Before going to the other benchmarks we know of, We would like to give
some background to benchmarks:
It's very easy to write a test that shows ANY database to be best
database in the world, by just restricting the test to something the
database is very good at and not test anything that the database is not
good at; If one after this publish the result with a single figure
things is even easier.
This would be like we would measure the speed of @strong{MySQL} compared
to PostgreSQL by looking at the summary time of the MySQL benchmarks on
our web page. Based on this @strong{MySQL} would be more than 40 times
faster than PostgreSQL, something that is of course not true. We could
make things even worse by just taking the test where PostgreSQL performs
worst and claim that @strong{MySQL} is more than 2000 times faster than
PostgreSQL.
The case is that @strong{MySQL} does a lot of optimizations that
PostgreSQL doesn't do and the other way around. An SQL optimizer is a
very complex thing and a company could spend years on just making the
optimizer faster and faster.
When looking at the benchmark results you should look for things that
you do in your application and just use these results to decide which
database would be best suited for your application. The benchmark
results also shows things a particular database is not good at and should
give you a notion about things to avoid and what you may have to do in
other ways.
We know of two benchmark tests that claims that PostgreSQL performers
better than @strong{MySQL}. These both where multi-user tests, a test
that we here at @strong{MySQL AB} haven't had time to write and include in
the benchmark suite, mainly because it's a big task to do this in a
manner that is fair against all databases.
One is the benchmark paid for by
@uref{http://www.greatbridge.com/about/press.php?content_id=4,Great
Bridge}.
This is the worst benchmark we have ever seen anyone ever conduct. This
was not only tuned to only test what PostgreSQL is absolutely best at,
it was also totally unfair against every other database involved in the
test.
@strong{NOTE}: We know that not even some of the main PostgreSQL
developers did like the way Great Bridge conducted the benchmark, so we
don't blame them for the way the benchmark was made.
This benchmark has been condemned in a lot of postings and newsgroups so
we will here just shortly repeat some things that where wrong with it.
@itemize @bullet
@item
The tests where run with an expensive commercial tool, that makes it
impossible for an open source company like us to verify the benchmarks,
or even check how the benchmark where really done. The tool is not even
a true benchmark tool, but a application/setup testing tool. To refer
this as STANDARD benchmark tool is to stretch the truth a long way.
@item
Great Bridge admitted that they had optimized the PostgreSQL database
(with vacuum() before the test) and tuned the startup for the tests,
something they hadn't done for any of the other databases involved. To
say "This process optimizes indexes and frees up disk space a bit. The
optimized indexes boost performance by some margin". Our benchmarks
clearly indicates that the difference in running a lot of selects on a
database with and without vacuum() can easily differ by a factor of 10.
@item
The test results where also strange; The AS3AP test documentation
mentions that the test does:
"selections, simple joins, projections, aggregates, one-tuple updates,
and bulk updates"
PostgreSQL is good at doing selects and joins (especially after a
vacuum()), but doesn't perform as well on inserts/updates; The
benchmarks seem to indicate that only SELECTs where done (or very few
updates) . This could easily explain they good results for PostgreSQL in
this test. The bad results for MySQL will be obvious a bit down in this
document.
@item
They did run the so called benchmark from a Windows machine against a
Linux machine over ODBC, a setup that no normal database user would ever
do when running a heavy multi-user application. This tested more the
ODBC driver and the Windows protocol used between the clients than the
database itself.
@item
When running the database against Oracle and MS-SQL (Great Bridge has
indirectly indicated that the databases they used in the test),
they didn't use the native protocol but instead ODBC. Anyone that has
ever used Oracle, knows that all real application uses the native
interface instead of ODBC. Doing a test through ODBC and claiming that
the results had anything to do with using the database for real can't
be regarded as fair play. They should have done two tests with and
without ODBC to provide the right facts (after having got experts to tune
all involved databases of course).
@item
They refer to the TPC-C tests, but doesn't anywhere mention that the
tests they did where not a true TPC-C test and they where not even
allowed to call it a TPC-C test. A TPC-C test can only be conducted by
the rules approved by the @uref{http://www.tpc.org,TPC-council}. Great
Bridge didn't do that. By doing this they have both violated the TPC
trademark and miscredited their own benchmarks. The rules set by the
TPC-council are very strict to ensure that no one can produce false
results or make unprovable statements. Apparently Great Bridge wasn't
interested in doing this.
@item
After the first test, we contacted Great Bridge and mentioned to them
some of the obvious mistakes they had done with @strong{MySQL}; Running
with a debug version of our ODBC driver, running on a Linux system that
wasn't optimized for threads, using an old MySQL version when there was
a recommended newer one available, not starting @strong{MySQL} with the
right options for heavy multi-user use (the default installation of
MySQL is tuned for minimal resource use). Great Bridge did run a new
test, with our optimized ODBC driver and with better startup options for
MySQL, but refused to either use our updated glibc library or our
standard binary (used by 80% of our users), which was statically linked
with a fixed glibc library.
According to what we know, Great Bridge did nothing to ensure that the
other databases where setup correctly to run good in their test
environment. We are sure however that they didn't contact Oracle or
Microsoft to ask for their advice in this matter ;)
@item
The benchmark was paid for by Great Bridge, and they decided to publish
only partial chosen results (instead of publishing it all).
@end itemize
Tim Perdue, a long time PostgreSQL fan and a reluctant MySQL user
published a comparison on
@uref{http://www.phpbuilder.com/columns/tim20001112.php3,phpbuider}.
When we got aware of the comparison, we phoned Tim Perdue about this
because there was a lot of strange things in his results. For example,
he claimed that MySQL had a problem with five users in his tests, when we
know that there are users with similar machines as his that are using
MySQL with 2000 simultaneous connections doing 400 queries per second (In
this case the limit was the web bandwidth, not the database).
It sounded like he was using a Linux kernel that either had some
problems with many threads (Linux kernels before 2.4 had a problem with
this but we have documented how to fix this and Tim should be aware of
this problem). The other possible problem could have been an old glibc
library and that Tim didn't use a MySQL binary from our site, which is
linked with a corrected glibc library, but had compiled a version of his
own with. In any of the above cases, the symptom would have been exactly
what Tim had measured.
We asked Tim if we could get access to his data so that we could repeat
the benchmark and if he could check the MySQL version on the machine to
find out what was wrong and he promised to come back to us about this.
He has not done that yet.
Because of this we can't put any trust in this benchmark either :(
Conclusion:
The only benchmarks that exist today that anyone can download and run
against @strong{MySQL}and PostgreSQL is the MySQL benchmarks. We here
at @strong{MySQL} believe that open source databases should be tested
with open source tools! This is the only way to ensure that no one
does tests that nobody can reproduce and use this to claim that a
database is better than another. Without knowing all the facts it's
impossible to answer the claims of the tester.
The thing we find strange is that every test we have seen about
PostgreSQL, that is impossible to reproduce, claims that PostgreSQL is
better in most cases while our tests, which anyone can reproduce,
clearly shows otherwise. With this we don't want to say that PostgreSQL
isn't good at many things (It is!) We would just like to see a fair test
where they are very good so that we could get some friendly competition
going!
For more information about our benchmarks suite see @xref{MySQL
Benchmarks}.
We are working on an even better benchmark suite, including much better
documentation of what the individual tests really do and how to add more
tests to the suite.
@cindex internals
@cindex threads
......@@ -44677,8 +45266,8 @@ of several databases simultaneously. By Innovative-IT Development AB.
The @strong{MySQL} GUI client homepage. By Sinisa at @strong{MySQL AB}.
@item @uref{http://www.mysql.com/Downloads/Contrib/mysql_navigator_0.9.0.tar.gz, MySQL navigator 0.9}
MySQL Navigator is MySQL database server GUI client program. The purpose
of MySQL Navigator is to provide a useful client interface to MySQL
MySQL Navigator is a @strong{MySQL} database server GUI client program. The purpose
of MySQL Navigator is to provide a useful client interface to @strong{MySQL}
database servers, whilst supporting multiple operating systems and
languages. You can currently import/export database, enter queries, get
result sets, edit scripts, run scripts, add, alter, and delete users,
......@@ -44701,7 +45290,7 @@ You can always find the latest version
@uref{http://www.trash.net/~ffischer/admin/index.html, here}.
@item @uref{http://www.mysql.com/Downloads/Win32/MySQL-Maker-1.0.zip,MySQL-Maker 1.0}.
Shareware @strong{MySQL} client for Windows. It's WYSIWYG tool which allows
Shareware @strong{MySQL} client for Windows. It's a WYSIWYG tool which allows
you to create, change and delete databases and tables.
You can change field - structure and add, change and delete data in
these tables directly without ODBC-driver.
......@@ -44711,9 +45300,14 @@ these tables directly without ODBC-driver.
Windows GUI (binary only) to administrate a database, by David B. Mansel,
@email{david@@zhadum.org}.
@item @uref{http://home.online.no/~runeberg/myqa, MyQA}
is a Linux-based query client for the @strong{MySQL} database server. MyQA
lets you enter SQL queries, execute them, and view the results, all in a
graphical user interface. The GUI is roughly similar to that of the
'Query Analyzer' client that comes with MS SQL Server.
@item @uref{http://members.xoom.com/_opex_/mysqlmanager/index.html, MySQL Manager}
a graphical MySQL server manager for MySQL server written in Java, for Windows
a graphical @strong{MySQL} server manager for @strong{MySQL} server written in Java, for Windows
@item @uref{http://www.mysql.com/Downloads/Win32/netadmin.zip, netadmin.zip}
......@@ -44794,6 +45388,24 @@ data either by clicking on the table folder or by composing their own SQL
statements with our built-in SQL editor. The tool has been tested with
Oracle 8 and @strong{MySQL} as the back-end databases. It requires JDK 1.3 from
JavaSoft.
@item @uref{http://www.jetools.com/products/databrowser/, DataBrowser}
The DataBrowser is a cross-database, cross-platform data access tool. It is more
user friendly than tools like SQL Plus, psql (command line based tools). It is more
flexible than TOAD, ISQL, PGAccess which are GUI's that are limitied to a single
platform or database.
@item @uref{http://www.intrex.net/amit/software/, SQLC}
The SQL Console is a standalone java application that allows you to connect to a
SQL database system and issue SQL queries and updates. It has an easy-to use
graphical user interface. The SQL Console uses JDBC to connect to the database
systems and, therefore, with proper JDBC drivers, you can use this utility to
connect to some of the most popular database systems.
@item @uref{http://www.mysql.com/Downloads/Contrib/mysql_mmc.zip, MySQL MMC}
MySQL MMC is a GUI Management Tool developed using kdevelop
with a very good interface completely like Microsoft
Enterprise Tool (for SQL Server) or Sybase Central. We
can use it to manage server, database, table, index,
users and to edit table data in grid or execute Sql
by Query Analysis.
@end itemize
@cindex Web clients
......@@ -44843,7 +45455,7 @@ html templates. By Alex Krohn.
This cgi scripts in Perl enables you to edit content of Mysql
database. By Tomas Zeman.
@item
@uref{http://futurerealm.com/opensource/futuresql.htm, FutureSQL Web Database Administration Tool}.
@uref{http://worldcommunity.com/opensource/futuresql, FutureSQL Web Database Administration Tool}.
FutureSQL by Peter F. Brown, is a free, open source rapid application
development Web database administration tool, written in Perl,
using @strong{MySQL}. It uses @code{DBI:DBD} and @code{CGI.pm}.
......@@ -44866,7 +45478,7 @@ and run update queries. Originally written to implement a simple fast
low-overhead banner-rotation system. By Sasha Pachev.
@item @uref{http://htcheck.sourceforge.net, htCheck} - URL checker with
MySQL backend. Spidered URLs can later be queried using SQL to retrieve
@strong{MySQL} backend. Spidered URLs can later be queried using SQL to retrieve
various kinds of information, eg. broken links. Written by Gabriele Bartolini.
@item @uref{http://www.odbsoft.com/cook/sources.htm}
......@@ -45102,6 +45714,10 @@ Patches for @code{radiusd} to make it support @strong{MySQL}. By Wim Bonis,
@appendixsec Useful Tools
@itemize @bullet
@item @uref{http://worldcommunity.com/opensource/utilities/mysql_backup.html, MySQL Backup}.
A backup script for MySQL. By Peter F. Brown.
@item @uref{http://www.mysql.com/Downloads/Contrib/mytop, mytop}
@item @uref{http://public.yahoo.com/~jzawodn/mytop/, mytop home page}
mytop is a Perl program that allows you to monitor @strong{MySQL} servers by
......@@ -45716,8 +46332,8 @@ Added @code{ALTER TABLE table_name DISABLE KEYS} and
@code{ALTER TABLE table_name ENABLE KEYS} commands.
@item
Added @code{HANDLER} command.
@item
Added support for symbolic links to @code{MyISAM} tables.
Added support for symbolic links to @code{MyISAM} tables. Symlink handling is
now enabled by default for Windows.
@item
Added @code{SQL_CALC_FOUND_ROWS} and @code{FOUND_ROWS()}. This makes it
possible to know how many rows a query would have returned
......@@ -45826,19 +46442,30 @@ not yet 100% confident in this code.
@appendixsubsec Changes in release 3.23.39
@itemize @bullet
@item
Fixed problem that client 'hang' when @code{LOAD TABLE FROM MASTER} failed.
If one dropped and added an @code{AUTO_INCREMENT} column, the
@code{AUTO_INCREMENT} sequence wasn't reset.
@item
@code{CREATE .. SELECT} now creates not unique indexes delayed.
@item
Fixed problem where @code{LOCK TABLES table_name READ} followed by
@code{FLUSH TABLES} put a exclusive lock on the table.
@item
Running @code{myisamchk --fast --force} will not anymore repair tables
@code{REAL} @@variables with was represented with 2 digits when
converted to strings.
@item
Fixed problem that client 'hung' when @code{LOAD TABLE FROM MASTER} failed.
@item
Running @code{myisamchk --fast --force} will no longer repair tables
that only had the open count wrong.
@item
Added functions to handle symbolic links to make life easier in 4.0.
@item
We are now using the @code{-lcma} thread library on HPUX 10.20 to
get @strong{MySQL} more stabile on HPUX.
We are now using the @code{-lcma} thread library on HP-UX 10.20 to
get @strong{MySQL} more stable on HP-UX.
@item
Fixed problem with @code{IF()} and number of decimals in the result.
@item
Fixed that date-part extract functions works with dates where day
Fixed date-part extraction functions to work with dates where day
and/or month is 0.
@item
Extended argument length in option files from 256 to 512 chars.
......@@ -45846,7 +46473,7 @@ Extended argument length in option files from 256 to 512 chars.
Fixed problem with shutdown when @code{INSERT DELAYED} was waiting for
a @code{LOCK TABLE}.
@item
Fixed coredump bug buged in InnoDB when tablespace was full.
Fixed coredump bug in InnoDB when tablespace was full.
@item
Fixed problem with @code{MERGE} tables and big tables (> 4G) when using
@code{ORDER BY}.
......@@ -46259,7 +46886,7 @@ Fixed problem when using @code{DECIMAL()} keys on negative numbers.
always returned @code{NULL}.
@item
Fixed security bug in something (please upgrade if you are using a earlier
MySQL 3.23 version).
@strong{MySQL} 3.23 version).
@item
Fixed buffer overflow bug when writing a certain error message.
@item
......@@ -46428,7 +47055,7 @@ slave server restart.
@item
@code{SHOW KEYS} now shows whether or not key is @code{FULLTEXT}.
@item
New script @file{mysqld_multi}. @xref{mysqld_multi}.
New script @file{mysqld_multi}. @xref{mysqld_multi, , @code{mysqld_multi}}.
@item
Added new script, @file{mysql-multi.server.sh}. Thanks to
Tim Bunce @email{Tim.Bunce@@ig.co.uk} for modifying @file{mysql.server} to
......@@ -46481,7 +47108,7 @@ read by @code{mysql_options()}.
Added new options @code{--pager[=...]}, @code{--no-pager},
@code{--tee=...} and @code{--no-tee} to the @code{mysql} client. The
new corresponding interactive commands are @code{pager}, @code{nopager},
@code{tee} and @code{notee}. @xref{mysql, @code{mysql}}, @code{mysql --help}
@code{tee} and @code{notee}. @xref{mysql, , @code{mysql}}, @code{mysql --help}
and the interactive help for more information.
@item
Fixed crash when automatic repair of @code{MyISAM} table failed.
......@@ -51082,6 +51709,10 @@ Everything in this list is approximately in the order it will be done. If you
want to affect the priority order, please register a license or support us and
tell us what you want to have done more quickly. @xref{Licensing and Support}.
The plan is that we in the future will support the full ANSI SQL99
standard, but with a lot of useful extensions. The challenge is to do
this without sacrifying the speed or compromise the code.
@node TODO MySQL 4.0, TODO future, TODO, TODO
@appendixsec Things that should be in 4.0
......@@ -16,7 +16,7 @@
/* Show databases, tables or columns */
#define SHOW_VERSION "8.2"
#define SHOW_VERSION "8.3"
#include <global.h>
#include <my_sys.h>
......@@ -30,6 +30,7 @@
static my_string host=0,opt_password=0,user=0;
static my_bool opt_show_keys=0,opt_compress=0,opt_status=0;
static uint opt_verbose=0;
static void get_options(int *argc,char ***argv);
static uint opt_mysql_port=0;
......@@ -140,6 +141,7 @@ static struct option long_options[] =
#ifndef DONT_ALLOW_USER_CHANGE
{"user", required_argument, 0, 'u'},
#endif
{"verbose", no_argument, 0, 'v'},
{"version", no_argument, 0, 'V'},
{0, 0, 0, 0}
};
......@@ -181,6 +183,8 @@ static void usage(void)
-u, --user=# user for login if not current user\n");
#endif
printf("\
-v, --verbose more verbose output; You can use this multiple times\n\
to get even more verbose output.\n\
-V, --version output version information and exit\n");
puts("\n\
......@@ -200,7 +204,7 @@ get_options(int *argc,char ***argv)
int c,option_index;
my_bool tty_password=0;
while ((c=getopt_long(*argc,*argv,"c:h:p::u:#::P:S:Ck?VWi",long_options,
while ((c=getopt_long(*argc,*argv,"c:h:p::u:#::P:S:Ck?vVWi",long_options,
&option_index)) != EOF)
{
switch(c) {
......@@ -210,6 +214,9 @@ get_options(int *argc,char ***argv)
case 'c':
charsets_dir= optarg;
break;
case 'v':
opt_verbose++;
break;
case 'h':
host = optarg;
break;
......@@ -277,10 +284,13 @@ static int
list_dbs(MYSQL *mysql,const char *wild)
{
const char *header;
uint length;
uint length, counter = 0;
ulong rowcount = 0L;
char tables[NAME_LEN+1], rows[NAME_LEN+1];
char query[255];
MYSQL_FIELD *field;
MYSQL_RES *result;
MYSQL_ROW row;
MYSQL_ROW row, trow, rrow;
if (!(result=mysql_list_dbs(mysql,wild)))
{
......@@ -297,10 +307,79 @@ list_dbs(MYSQL *mysql,const char *wild)
if (length < field->max_length)
length=field->max_length;
if (!opt_verbose)
print_header(header,length,NullS);
else if (opt_verbose == 1)
print_header(header,length,"Tables",6,NullS);
else
print_header(header,length,"Tables",6,"Total Rows",12,NullS);
while ((row = mysql_fetch_row(result)))
{
counter++;
if (opt_verbose)
{
/*
* Original code by MG16373; Slightly modified by Monty.
* Print now the count of tables and rows for each database.
*/
if (!(mysql_select_db(mysql,row[0])))
{
MYSQL_RES *tresult = mysql_list_tables(mysql,(char*)NULL);
if (mysql_affected_rows(mysql) > 0)
{
sprintf(tables,"%6lu",(ulong) mysql_affected_rows(mysql));
rowcount = 0;
if (opt_verbose > 1)
{
while ((trow = mysql_fetch_row(tresult)))
{
sprintf(query,"SELECT COUNT(*) FROM `%s`",trow[0]);
if (!(mysql_query(mysql,query)))
{
MYSQL_RES *rresult;
if ((rresult = mysql_store_result(mysql)))
{
rrow = mysql_fetch_row(rresult);
rowcount += (ulong) strtoull(rrow[0], (char**) 0, 10);
mysql_free_result(rresult);
}
}
}
sprintf(rows,"%12lu",rowcount);
}
}
else
{
sprintf(tables,"%6d",0);
sprintf(rows,"%12d",0);
}
mysql_free_result(tresult);
}
else
{
strmov(tables,"N/A");
strmov(rows,"N/A");
}
}
if (!opt_verbose)
print_row(row[0],length,0);
print_trailer(length,0);
else if (opt_verbose == 1)
print_row(row[0],length,tables,6,NullS);
else
print_row(row[0],length,tables,6,rows,12,NullS);
}
print_trailer(length,
(opt_verbose > 0 ? 6 : 0),
(opt_verbose > 1 ? 12 :0),
0);
if (counter && opt_verbose)
printf("%u row%s in set.\n",counter,(counter > 1) ? "s" : "");
mysql_free_result(result);
return 0;
}
......@@ -310,10 +389,11 @@ static int
list_tables(MYSQL *mysql,const char *db,const char *table)
{
const char *header;
uint head_length;
uint head_length, counter = 0;
char query[255], rows[64], fields[16];
MYSQL_FIELD *field;
MYSQL_RES *result;
MYSQL_ROW row;
MYSQL_ROW row, rrow;
if (mysql_select_db(mysql,db))
{
......@@ -338,14 +418,81 @@ list_tables(MYSQL *mysql,const char *db,const char *table)
if (head_length < field->max_length)
head_length=field->max_length;
if (!opt_verbose)
print_header(header,head_length,NullS);
else if (opt_verbose == 1)
print_header(header,head_length,"Columns",8,NullS);
else
print_header(header,head_length,"Columns",8, "Total Rows",10,NullS);
while ((row = mysql_fetch_row(result)))
print_row(row[0],head_length,0);
print_trailer(head_length,0);
{
/*
* Modified by MG16373
* Print now the count of rows for each table.
*/
counter++;
if (opt_verbose > 0)
{
if (!(mysql_select_db(mysql,db)))
{
MYSQL_RES *rresult = mysql_list_fields(mysql,row[0],NULL);
ulong rowcount=0L;
if (!rresult)
{
strmov(fields,"N/A");
strmov(rows,"N/A");
}
else
{
sprintf(fields,"%8u",(uint) mysql_num_fields(rresult));
mysql_free_result(rresult);
if (opt_verbose > 1)
{
sprintf(query,"SELECT COUNT(*) FROM `%s`",row[0]);
if (!(mysql_query(mysql,query)))
{
if ((rresult = mysql_store_result(mysql)))
{
rrow = mysql_fetch_row(rresult);
rowcount += (unsigned long) strtoull(rrow[0], (char**) 0, 10);
mysql_free_result(rresult);
}
sprintf(rows,"%10lu",rowcount);
}
else
sprintf(rows,"%10d",0);
}
}
}
else
{
strmov(fields,"N/A");
strmov(rows,"N/A");
}
}
if (!opt_verbose)
print_row(row[0],head_length,NullS);
else if (opt_verbose == 1)
print_row(row[0],head_length, fields,8, NullS);
else
print_row(row[0],head_length, fields,8, rows,10, NullS);
}
print_trailer(head_length,
(opt_verbose > 0 ? 8 : 0),
(opt_verbose > 1 ? 10 :0),
0);
if (counter && opt_verbose)
printf("%u row%s in set.\n\n",counter,(counter > 1) ? "s" : "");
mysql_free_result(result);
return 0;
}
static int
list_table_status(MYSQL *mysql,const char *db,const char *wild)
{
......
......@@ -31,7 +31,8 @@ struct mem_area_struct{
};
/* Each memory area takes this many extra bytes for control information */
#define MEM_AREA_EXTRA_SIZE (sizeof(struct mem_area_struct))
#define MEM_AREA_EXTRA_SIZE (ut_calc_align(sizeof(struct mem_area_struct),\
UNIV_MEM_ALIGNMENT))
/************************************************************************
Creates a memory pool. */
......
......@@ -171,10 +171,10 @@ page_cur_search(
ut_ad(dtuple_check_typed(tuple));
page_cur_search_with_match(page, tuple, mode,
&low_matched_fields,
&low_matched_bytes,
&up_matched_fields,
&up_matched_bytes,
&low_matched_fields,
&low_matched_bytes,
cursor);
return(low_matched_fields);
}
......
......@@ -2207,7 +2207,7 @@ row_sel_get_clust_rec_for_mysql(
visit through secondary index records that would not really
exist in our snapshot. */
if ((old_vers || rec_get_deleted_flag(rec))
if (clust_rec && (old_vers || rec_get_deleted_flag(rec))
&& !row_sel_sec_rec_is_for_clust_rec(rec, sec_index,
clust_rec, clust_index)) {
clust_rec = NULL;
......
......@@ -176,7 +176,7 @@ trx_rollback_all_without_sess(void)
if (UT_LIST_GET_FIRST(trx_sys->trx_list)) {
fprintf(stderr,
"Innobase: Starting rollback of uncommitted transactions\n");
"InnoDB: Starting rollback of uncommitted transactions\n");
} else {
return;
}
......@@ -196,7 +196,7 @@ loop:
if (trx == NULL) {
fprintf(stderr,
"Innobase: Rollback of uncommitted transactions completed\n");
"InnoDB: Rollback of uncommitted transactions completed\n");
mem_heap_free(heap);
......@@ -221,7 +221,7 @@ loop:
ut_a(thr == que_fork_start_command(fork, SESS_COMM_EXECUTE, 0));
fprintf(stderr, "Innobase: Rolling back trx no %lu\n",
fprintf(stderr, "InnoDB: Rolling back trx no %lu\n",
ut_dulint_get_low(trx->id));
mutex_exit(&kernel_mutex);
......@@ -238,7 +238,7 @@ loop:
mutex_exit(&kernel_mutex);
fprintf(stderr,
"Innobase: Waiting rollback of trx no %lu to end\n",
"InnoDB: Waiting rollback of trx no %lu to end\n",
ut_dulint_get_low(trx->id));
os_thread_sleep(100000);
......@@ -264,7 +264,7 @@ loop:
mutex_exit(&(dict_sys->mutex));
}
fprintf(stderr, "Innobase: Rolling back of trx no %lu completed\n",
fprintf(stderr, "InnoDB: Rolling back of trx no %lu completed\n",
ut_dulint_get_low(trx->id));
mem_heap_free(heap);
......
......@@ -198,7 +198,7 @@ trx_sys_init_at_db_start(void)
if (UT_LIST_GET_LEN(trx_sys->trx_list) > 0) {
fprintf(stderr,
"Innobase: %lu uncommitted transaction(s) which must be rolled back\n",
"InnoDB: %lu uncommitted transaction(s) which must be rolled back\n",
UT_LIST_GET_LEN(trx_sys->trx_list));
}
......
......@@ -954,7 +954,7 @@ static int _nisam_cmp_buffer(File file, const byte *buff, ulong filepos, uint le
{
if (my_read(file,temp_buff,next_length,MYF(MY_NABP)))
goto err;
if (memcmp((byte*) buff,temp_buff,IO_SIZE))
if (memcmp((byte*) buff,temp_buff,next_length))
DBUG_RETURN(1);
buff+=next_length;
length-= next_length;
......
......@@ -1221,20 +1221,19 @@ static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos,
char temp_buff[IO_SIZE*2];
DBUG_ENTER("_mi_cmp_buffer");
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
next_length= IO_SIZE*2 - (uint) (filepos & (IO_SIZE-1));
while (length > IO_SIZE*2)
{
if (my_read(file,temp_buff,next_length,MYF(MY_NABP)))
if (my_pread(file,temp_buff,next_length,filepos, MYF(MY_NABP)) ||
memcmp((byte*) buff,temp_buff,next_length))
goto err;
if (memcmp((byte*) buff,temp_buff,IO_SIZE))
DBUG_RETURN(1);
filepos+=next_length;
buff+=next_length;
length-= next_length;
next_length=IO_SIZE*2;
}
if (my_read(file,temp_buff,length,MYF(MY_NABP)))
if (my_pread(file,temp_buff,length,filepos,MYF(MY_NABP)))
goto err;
DBUG_RETURN(memcmp((byte*) buff,temp_buff,length));
err:
......
......@@ -1010,7 +1010,7 @@ uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BLOCK_INFO *info, File file,
{
ref_length=myisam->s->pack.ref_length;
/*
We can't use my_pread() here because mi_rad_pack_record assumes
We can't use my_pread() here because mi_read_rnd_pack_record assumes
position is ok
*/
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
......
......@@ -155,6 +155,10 @@ while test $# -gt 0; do
EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $1"
SLEEP_TIME=`$ECHO "$1" | $SED -e "s;--sleep=;;"`
;;
--mysqld=*)
TMP=`$ECHO "$1" | $SED -e "s;--mysqld-=;"`
EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $TMP"
;;
--gcov )
if [ x$BINARY_DIST = x1 ] ; then
$ECHO "Cannot do coverage test without the source - please use source dist"
......@@ -170,6 +174,7 @@ while test $# -gt 0; do
$ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with --gdb option"
fi
DO_GDB=1
USE_RUNNING_SERVER=""
;;
--client-gdb )
if [ x$BINARY_DIST = x1 ] ; then
......@@ -182,6 +187,7 @@ while test $# -gt 0; do
$ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with --ddd option"
fi
DO_DDD=1
USE_RUNNING_SERVER=""
;;
--skip-*)
EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT $1"
......
......@@ -27,3 +27,8 @@ n
12
Table Op Msg_type Msg_text
test.t1 optimize status OK
i
1
2
3
4
Table Op Msg_type Msg_text
test.t1 check status OK
......@@ -8,3 +8,7 @@ b
1 10000000001
a$1 $b c$
1 2 3
table type possible_keys key key_len ref rows Extra
t2 ref B B 21 const 1 where used
a B
3 world
@test @`select` @TEST @not_used
1 2 3 NULL
@test_int @test_double @test_string @test_string2 @select
10 0.00 abcdeghi abcdefghij NULL
10 1e-10 abcdeghi abcdefghij NULL
@test_int @test_double @test_string @test_string2
hello hello hello hello
@test_int @test_double @test_string @test_string2
......@@ -10,3 +10,5 @@ hellohello hellohello hellohello hellohello
NULL NULL NULL NULL
@t1:=(@t2:=1)+@t3:=4 @t1 @t2 @t3
5 5 1 4
@t5
1.23456
......@@ -71,7 +71,6 @@ ALTER TABLE t1 ADD Column new_col int not null;
UNLOCK TABLES;
OPTIMIZE TABLE t1;
DROP TABLE t1;
drop table if exists t1;
#
# ALTER TABLE ... ENABLE/DISABLE KEYS
......@@ -91,3 +90,13 @@ while ($1)
}
alter table t1 enable keys;
drop table t1;
#
# Drop and add an auto_increment column
#
create table t1 (i int unsigned not null auto_increment primary key);
insert into t1 values (null),(null),(null),(null);
alter table t1 drop i,add i int unsigned not null auto_increment, drop primary key, add primary key (i);
select * from t1;
drop table t1;
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
connection con1;
drop table if exists t1;
#add a lot of keys to slow down check
create table t1(n int not null, key(n), key(n), key(n), key(n));
let $1=10000;
while ($1)
{
eval insert into t1 values ($1);
dec $1;
}
send check table t1 type=extended;
connection con2;
insert into t1 values (200000);
connection con1;
reap;
......@@ -2,6 +2,7 @@
# Check some special create statements.
#
drop table if exists t1,t2;
create table t1 (b char(0));
insert into t1 values (""),(null);
select * from t1;
......@@ -57,3 +58,14 @@ select a$1, $b, c$ from test_$1.$test1;
create table test_$1.test2$ (a int);
drop table test_$1.test2$;
drop database test_$1;
#
# Test of CREATE ... SELECT with indexes
#
create table t1 (a int auto_increment not null primary key, B CHAR(20));
insert into t1 (b) values ("hello"),("my"),("world");
create table t2 (key (b)) select * from t1;
explain select * from t2 where b="world";
select * from t2 where b="world";
drop table t1,t2;
#
# test variables
#
set @`test`=1,@TEST=3,@select=2;
set @`test`=1,@TEST=3,@select=2,@t5=1.23456;
select @test,@`select`,@TEST,@not_used;
set @test_int=10,@test_double=1e-10,@test_string="abcdeghi",@test_string2="abcdefghij",@select=NULL;
select @test_int,@test_double,@test_string,@test_string2,@select;
......@@ -12,3 +12,5 @@ select @test_int,@test_double,@test_string,@test_string2;
set @test_int=null,@test_double=null,@test_string=null,@test_string2=null;
select @test_int,@test_double,@test_string,@test_string2;
select @t1:=(@t2:=1)+@t3:=4,@t1,@t2,@t3;
select @t5;
......@@ -69,7 +69,7 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset,
DBUG_RETURN(MY_FILE_ERROR); /* Return with error */
}
if (MyFlags & (MY_NABP | MY_FNABP))
DBUG_RETURN(0); /* Ok vid l{sning */
DBUG_RETURN(0); /* Read went ok; Return 0 */
DBUG_RETURN(readbytes); /* purecov: inspected */
}
} /* my_pread */
......
......@@ -11,30 +11,29 @@ export machine system version
SOURCE=`pwd`
CP="cp -p"
# Debug option must come first
STRIP=1
DEBUG=0
if test x$1 = x"--debug"
then
DEBUG=1
shift 1
fi
# Save temporary distribution here (must be full path)
SILENT=0
TMP=/tmp
if test $# -gt 0
then
TMP=$1
shift 1
fi
# Get optional suffix for distribution
SUFFIX=""
if test $# -gt 0
then
SUFFIX=$1
shift 1
fi
parse_arguments() {
for arg do
case "$arg" in
--debug) DEBUG=1;;
--tmp=*) TMP=`echo "$arg" | sed -e "s;--tmp=;;"` ;;
--suffix=*) SUFFIX=`echo "$arg" | sed -e "s;--suffix=;;"` ;;
--no-strip) STRIP=0 ;;
--silent) SILENT=1 ;;
*)
echo "Unknown argument '$arg'"
exit 1
;;
esac
done
}
parse_arguments "$@"
#make
......@@ -68,14 +67,18 @@ for i in extra/comp_err extra/replace extra/perror extra/resolveip \
client/mysql sql/mysqld client/mysqlshow client/mysqlcheck \
client/mysqladmin client/mysqldump client/mysqlimport client/mysqltest \
client/.libs/mysql client/.libs/mysqlshow client/.libs/mysqladmin \
client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest
client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest \
client/.libs/mysqlcheck
do
if [ -f $i ]
then
$CP $i $BASE/bin
fi
done
strip $BASE/bin/*
if [ x$STRIP = x1 ] ; then
strip $BASE/bin/*
fi
for i in sql/mysqld.sym.gz
do
......@@ -190,7 +193,13 @@ fi
echo "Using $tar to create archive"
cd $TMP
$tar cvf $SOURCE/$NEW_NAME.tar $NEW_NAME
OPT=cvf
if [ x$SILENT = x1 ] ; then
OPT=cf
fi
$tar $OPT $SOURCE/$NEW_NAME.tar $NEW_NAME
cd $SOURCE
echo "Compressing archive"
gzip -9 $NEW_NAME.tar
......
# This file describes how to run MySQL benchmarks with PostgreSQL
# This file describes how to run MySQL benchmark suite with PostgreSQL
#
# WARNING:
#
# Don't run the --fast test on a PostgreSQL 7.1.1 database on
# which you have any critical data; During one of our test runs
# PostgreSQL got a corrupted database and all data was destroyed!
# (When we tried to restart postmaster, It died with a
# When we tried to restart postmaster, It died with a
# 'no such file or directory' error and never recovered from that!
#
# Another time vacuum() filled our system disk with had 6G free
# while vaccuming a table of 60 M.
#
# We have sent a mail about this to the PostgreSQL mailing list, so
# the PostgreSQL developers should be aware of these problems and should
# hopefully fix this soon.
#
# WARNING
# The test was run on a Intel Xeon 2x 550 Mzh machine with 1G memory,
# 9G hard disk. The OS is Suse 6.4, with Linux 2.2.14 compiled with SMP
# 9G hard disk. The OS is Suse 7.1, with Linux 2.4.0 compiled with SMP
# support
# Both the perl client and the database server is run
# on the same machine. No other cpu intensive process was used during
......@@ -73,8 +80,15 @@ make install
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql
# and a test where we do a vacuum() after each update.
# (The time for vacuum() is counted in the book-keeping() column)
# When running with --fast we run the following vacuum commands on
# the database between each major update of the tables:
# vacuum table
# or
# vacuum analyze
# vacuum
# The time for vacuum() is accounted for in the book-keeping() column, not
# in the test that updates the database.
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast
......
......@@ -21,7 +21,7 @@ benchdir_root= $(prefix)
benchdir = $(benchdir_root)/sql-bench
bench_SCRIPTS = test-ATIS test-connect test-create test-insert \
test-big-tables test-select test-wisconsin \
test-alter-table \
test-alter-table graph-compare-results \
bench-init.pl compare-results run-all-tests \
server-cfg crash-me copy-db bench-count-distinct
CLEANFILES = $(bench_SCRIPTS)
......@@ -30,7 +30,7 @@ EXTRA_SCRIPTS = test-ATIS.sh test-connect.sh test-create.sh \
test-alter-table.sh test-wisconsin.sh \
bench-init.pl.sh compare-results.sh server-cfg.sh \
run-all-tests.sh crash-me.sh copy-db.sh \
bench-count-distinct.sh
bench-count-distinct.sh graph-compare-results.sh
EXTRA_DIST = $(EXTRA_SCRIPTS)
dist-hook:
......
......@@ -11,7 +11,7 @@ In this directory are the queries and raw data files used to populate
the MySQL benchmarks. In order to run the benchmarks you should normally
execute a command like the following:
run-all-tests --server=msyql --cmp=mysql,pg,solid --user=test --password=test --log
run-all-tests --server=mysql --cmp=mysql,pg,solid --user=test --password=test --log
The above means that one wants to run the benchmark with MySQL. The limits
should be taken from all of mysql,PostgreSQL and Solid. Login name and
......
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:46:54
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (9768): 2 wallclock secs ( 0.49 usr 0.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data
Time for select_simple_join (500): 2 wallclock secs ( 0.63 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_join (200): 15 wallclock secs ( 4.21 usr 2.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_distinct (800): 12 wallclock secs ( 1.70 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (2600): 12 wallclock secs ( 1.43 usr 0.39 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 43 wallclock secs ( 8.46 usr 3.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 19:26:17
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Inserting data
Time to insert (9768): 3 wallclock secs ( 0.45 usr 0.44 sys + 0.00 cusr 0.00 csys = 0.89 CPU)
Retrieving data
Time for select_simple_join (500): 3 wallclock secs ( 0.68 usr 0.19 sys + 0.00 cusr 0.00 csys = 0.87 CPU)
Time for select_join (100): 3 wallclock secs ( 0.51 usr 0.20 sys + 0.00 cusr 0.00 csys = 0.71 CPU)
Time for select_key_prefix_join (100): 13 wallclock secs ( 4.08 usr 2.01 sys + 0.00 cusr 0.00 csys = 6.09 CPU)
Time for select_distinct (800): 15 wallclock secs ( 1.75 usr 0.69 sys + 0.00 cusr 0.00 csys = 2.44 CPU)
Time for select_group (2600): 20 wallclock secs ( 1.57 usr 0.41 sys + 0.00 cusr 0.00 csys = 1.98 CPU)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 57 wallclock secs ( 9.06 usr 3.94 sys + 0.00 cusr 0.00 csys = 13.00 CPU)
Benchmark DBD suite: 2.9
Date of test: 2000-08-17 19:09:48
Running tests on: Linux 2.2.14-my-SMP i686
Arguments:
Comments: Intel Xeon, 2x550 Mhz, 1G ram, key_buffer=16M
Limits from: mysql,pg
Server version: MySQL 3.23.22 beta
ATIS: Total time: 43 wallclock secs ( 8.46 usr 3.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
alter-table: Total time: 260 wallclock secs ( 0.27 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
big-tables: Total time: 30 wallclock secs ( 8.19 usr 6.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
connect: Total time: 53 wallclock secs (26.25 usr 9.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
create: Total time: 121 wallclock secs ( 8.83 usr 3.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
insert: Total time: 1592 wallclock secs (254.20 usr 98.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
select: Total time: 1692 wallclock secs (111.29 usr 65.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
wisconsin: Total time: 16 wallclock secs ( 2.87 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
All 8 test executed successfully
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 252.00 0.20 0.02 0.00 992
connect 10.00 6.60 1.51 0.00 10000
connect+select_1_row 13.00 7.08 2.47 0.00 10000
connect+select_simple 13.00 7.36 2.24 0.00 10000
count 46.00 0.07 0.00 0.00 100
count_distinct 124.00 0.65 0.16 0.00 1000
count_distinct_big 623.00 69.07 56.00 0.00 1020
count_distinct_group 77.00 0.94 0.33 0.00 1000
count_distinct_group_on_key 64.00 0.37 0.07 0.00 1000
count_distinct_group_on_key_parts 77.00 0.93 0.45 0.00 1000
count_group_on_key_parts 61.00 1.09 0.27 0.00 1000
count_on_key 574.00 16.11 3.17 0.00 50100
create+drop 26.00 2.10 0.81 0.00 10000
create_MANY_tables 32.00 1.97 0.49 0.00 10000
create_index 4.00 0.00 0.00 0.00 8
create_key+drop 40.00 3.64 0.72 0.00 10000
create_table 0.00 0.00 0.00 0.00 31
delete_big 21.00 0.00 0.00 0.00 13
delete_big_many_keys 120.00 0.00 0.00 0.00 2
delete_key 4.00 0.50 0.47 0.00 10000
drop_index 4.00 0.00 0.00 0.00 8
drop_table 0.00 0.00 0.00 0.00 28
drop_table_when_MANY_tables 9.00 0.44 0.49 0.00 10000
insert 130.00 20.73 12.97 0.00 350768
insert_duplicates 113.00 18.31 11.27 0.00 300000
insert_key 159.00 8.91 4.08 0.00 100000
insert_many_fields 8.00 0.29 0.08 0.00 2000
min_max 31.00 0.03 0.00 0.00 60
min_max_on_key 213.00 25.00 4.86 0.00 85000
order_by 47.00 19.72 16.45 0.00 10
order_by_key 31.00 19.75 10.54 0.00 10
select_1_row 3.00 0.74 0.62 0.00 10000
select_2_rows 3.00 0.45 0.58 0.00 10000
select_big 37.00 23.09 11.64 0.00 10080
select_column+column 3.00 0.52 0.59 0.00 10000
select_diff_key 210.00 0.28 0.07 0.00 500
select_distinct 12.00 1.70 0.68 0.00 800
select_group 70.00 1.49 0.40 0.00 2711
select_group_when_MANY_tables 14.00 0.68 0.63 0.00 10000
select_join 15.00 4.21 2.20 0.00 200
select_key 129.00 66.05 14.03 0.00 200000
select_key_prefix 130.00 67.36 13.74 0.00 200000
select_many_fields 22.00 7.89 6.66 0.00 2000
select_range 21.00 7.00 1.72 0.00 25420
select_range_prefix 18.00 6.07 1.50 0.00 25010
select_simple 2.00 0.52 0.49 0.00 10000
select_simple_join 2.00 0.63 0.32 0.00 500
update_big 65.00 0.01 0.00 0.00 500
update_of_key 25.00 2.51 2.23 0.00 500
update_of_key_big 33.00 0.06 0.00 0.00 501
update_of_primary_key_many_keys 67.00 0.00 0.01 0.00 256
update_with_key 109.00 13.71 11.48 0.00 100000
wisc_benchmark 4.00 1.75 0.68 0.00 114
TOTALS 3920.00 438.58 200.19 0.00 1594242
Benchmark DBD suite: 2.12
Date of test: 2001-06-05 19:27:31
Running tests on: Linux 2.4.0-64GB-SMP i686
Arguments:
Comments: Intel Xeon, 2x550 Mhz, 512M, key_buffer=16M
Limits from: mysql,pg
Server version: MySQL 3.23.39
ATIS: Total time: 57 wallclock secs ( 9.06 usr 3.94 sys + 0.00 cusr 0.00 csys = 13.00 CPU)
alter-table: Total time: 271 wallclock secs ( 0.18 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.20 CPU)
big-tables: Total time: 33 wallclock secs ( 9.40 usr 7.64 sys + 0.00 cusr 0.00 csys = 17.04 CPU)
connect: Total time: 86 wallclock secs (33.98 usr 18.10 sys + 0.00 cusr 0.00 csys = 52.08 CPU)
create: Total time: 103 wallclock secs ( 7.83 usr 3.60 sys + 0.00 cusr 0.00 csys = 11.43 CPU)
insert: Total time: 2736 wallclock secs (661.21 usr 182.47 sys + 0.00 cusr 0.00 csys = 843.68 CPU)
select: Total time: 1949 wallclock secs (70.03 usr 16.42 sys + 0.00 cusr 0.00 csys = 86.45 CPU)
wisconsin: Total time: 19 wallclock secs ( 3.92 usr 1.70 sys + 0.00 cusr 0.00 csys = 5.62 CPU)
All 8 test executed successfully
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 261.00 0.13 0.02 0.15 992
connect 16.00 6.84 2.50 9.34 10000
connect+select_1_row 15.00 7.11 3.70 10.81 10000
connect+select_simple 13.00 6.70 3.21 9.91 10000
count 45.00 0.01 0.00 0.01 100
count_distinct 60.00 0.42 0.08 0.50 1000
count_distinct_2 63.00 0.18 0.03 0.21 1000
count_distinct_big 165.00 7.78 3.16 10.94 120
count_distinct_group 194.00 1.21 0.37 1.58 1000
count_distinct_group_on_key 59.00 0.51 0.07 0.58 1000
count_distinct_group_on_key_parts 194.00 1.12 0.46 1.58 1000
count_distinct_key_prefix 51.00 0.45 0.08 0.53 1000
count_group_on_key_parts 58.00 1.16 0.35 1.51 1000
count_on_key 586.00 16.61 2.71 19.32 50100
create+drop 33.00 2.94 0.82 3.76 10000
create_MANY_tables 18.00 1.02 0.62 1.64 5000
create_index 5.00 0.00 0.00 0.00 8
create_key+drop 41.00 3.05 0.66 3.71 10000
create_table 0.00 0.01 0.00 0.01 31
delete_all 17.00 0.00 0.00 0.00 12
delete_all_many_keys 75.00 0.03 0.00 0.03 1
delete_big 1.00 0.00 0.00 0.00 1
delete_big_many_keys 75.00 0.03 0.00 0.03 128
delete_key 4.00 0.76 0.29 1.05 10000
drop_index 5.00 0.00 0.00 0.00 8
drop_table 0.00 0.00 0.00 0.00 28
drop_table_when_MANY_tables 6.00 0.37 0.63 1.00 5000
insert 144.00 24.06 14.28 38.34 350768
insert_duplicates 31.00 5.06 3.72 8.78 100000
insert_key 137.00 9.91 6.26 16.17 100000
insert_many_fields 10.00 0.54 0.08 0.62 2000
insert_select_1_key 7.00 0.00 0.00 0.00 1
insert_select_2_keys 9.00 0.00 0.00 0.00 1
min_max 30.00 0.04 0.01 0.05 60
min_max_on_key 230.00 28.28 4.43 32.71 85000
order_by_big 78.00 22.39 9.83 32.22 10
order_by_big_key 33.00 23.35 10.15 33.50 10
order_by_big_key2 32.00 22.53 9.81 32.34 10
order_by_big_key_desc 36.00 23.47 10.27 33.74 10
order_by_big_key_diff 74.00 22.66 9.76 32.42 10
order_by_big_key_prefix 33.00 22.18 9.81 31.99 10
order_by_key2_diff 9.00 1.30 0.85 2.15 500
order_by_key_prefix 4.00 0.97 0.57 1.54 500
order_by_range 8.00 1.26 0.49 1.75 500
outer_join 110.00 0.00 0.00 0.00 10
outer_join_found 107.00 0.00 0.00 0.00 10
outer_join_not_found 59.00 0.00 0.00 0.00 500
outer_join_on_key 60.00 0.00 0.00 0.00 10
select_1_row 3.00 0.81 0.69 1.50 10000
select_2_rows 3.00 0.67 0.63 1.30 10000
select_big 63.00 32.72 16.55 49.27 10080
select_column+column 4.00 0.52 0.46 0.98 10000
select_diff_key 193.00 0.32 0.04 0.36 500
select_distinct 15.00 1.75 0.69 2.44 800
select_group 75.00 1.59 0.45 2.04 2711
select_group_when_MANY_tables 5.00 0.43 0.87 1.30 5000
select_join 3.00 0.51 0.20 0.71 100
select_key 132.00 53.98 10.53 64.51 200000
select_key2 139.00 78.61 11.08 89.69 200000
select_key2_return_key 131.00 64.58 9.61 74.19 200000
select_key2_return_prim 134.00 72.33 11.34 83.67 200000
select_key_prefix 141.00 86.32 12.05 98.37 200000
select_key_prefix_join 13.00 4.08 2.01 6.09 100
select_key_return_key 125.00 59.92 12.00 71.92 200000
select_many_fields 23.00 8.85 7.55 16.40 2000
select_query_cache 120.00 3.67 0.53 4.20 10000
select_query_cache2 120.00 3.80 0.57 4.37 10000
select_range 201.00 9.05 3.95 13.00 410
select_range_key2 21.00 7.15 1.40 8.55 25010
select_range_prefix 22.00 6.55 1.40 7.95 25010
select_simple 2.00 0.54 0.49 1.03 10000
select_simple_join 3.00 0.68 0.19 0.87 500
update_big 64.00 0.00 0.00 0.00 10
update_of_key 25.00 2.62 1.44 4.06 50000
update_of_key_big 35.00 0.05 0.04 0.09 501
update_of_primary_key_many_keys 47.00 0.01 0.02 0.03 256
update_with_key 119.00 18.44 12.64 31.08 300000
update_with_key_prefix 36.00 6.23 3.85 10.08 100000
wisc_benchmark 5.00 2.33 0.52 2.85 114
TOTALS 5323.00 795.55 233.87 1029.42 2551551
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:47:38
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 0 wallclock secs ( 0.06 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for alter_table_add (992): 252 wallclock secs ( 0.20 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 260 wallclock secs ( 0.27 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:47:22
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 0 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for alter_table_add (992): 261 wallclock secs ( 0.13 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.15 CPU)
Time for create_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 271 wallclock secs ( 0.18 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.20 CPU)
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:51:59
Testing server 'MySQL 3.23.39' at 2001-06-05 13:51:53
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 9 wallclock secs ( 4.07 usr 3.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time to select_many_fields(1000): 10 wallclock secs ( 4.43 usr 4.17 sys + 0.00 cusr 0.00 csys = 8.60 CPU)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 13 wallclock secs ( 3.82 usr 3.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time to select_many_fields(1000): 13 wallclock secs ( 4.42 usr 3.38 sys + 0.00 cusr 0.00 csys = 7.80 CPU)
Testing insert VALUES()
Time to insert_many_fields(1000): 3 wallclock secs ( 0.23 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time to insert_many_fields(1000): 3 wallclock secs ( 0.46 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.49 CPU)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 5 wallclock secs ( 0.06 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time to insert_many_fields(1000): 7 wallclock secs ( 0.08 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.13 CPU)
Total time: 30 wallclock secs ( 8.19 usr 6.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 33 wallclock secs ( 9.40 usr 7.64 sys + 0.00 cusr 0.00 csys = 17.04 CPU)
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:52:30
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 10 wallclock secs ( 6.60 usr 1.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 13 wallclock secs ( 7.36 usr 2.24 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test simple select
Time for select_simple (10000): 2 wallclock secs ( 0.52 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 13 wallclock secs ( 7.08 usr 2.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 3 wallclock secs ( 0.74 usr 0.62 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 3 wallclock secs ( 0.45 usr 0.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test select with aritmetic (+)
Time for select_column+column (10000): 3 wallclock secs ( 0.52 usr 0.59 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing retrieval of big records (7000 bytes)
Time to select_big (10000): 6 wallclock secs ( 2.98 usr 1.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 53 wallclock secs (26.25 usr 9.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:52:26
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 16 wallclock secs ( 6.84 usr 2.50 sys + 0.00 cusr 0.00 csys = 9.34 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 13 wallclock secs ( 6.70 usr 3.21 sys + 0.00 cusr 0.00 csys = 9.91 CPU)
Test simple select
Time for select_simple (10000): 2 wallclock secs ( 0.54 usr 0.49 sys + 0.00 cusr 0.00 csys = 1.03 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 15 wallclock secs ( 7.11 usr 3.70 sys + 0.00 cusr 0.00 csys = 10.81 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 3 wallclock secs ( 0.81 usr 0.69 sys + 0.00 cusr 0.00 csys = 1.50 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 3 wallclock secs ( 0.67 usr 0.63 sys + 0.00 cusr 0.00 csys = 1.30 CPU)
Test select with aritmetic (+)
Time for select_column+column (10000): 4 wallclock secs ( 0.52 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.98 CPU)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 30 wallclock secs (10.79 usr 6.41 sys + 0.00 cusr 0.00 csys = 17.20 CPU)
Total time: 86 wallclock secs (33.98 usr 18.10 sys + 0.00 cusr 0.00 csys = 52.08 CPU)
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:53:24
Testing the speed of creating and droping tables
Testing with 10000 tables and 10000 loop count
Testing create of tables
Time for create_MANY_tables (10000): 32 wallclock secs ( 1.97 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Accessing tables
Time to select_group_when_MANY_tables (10000): 14 wallclock secs ( 0.68 usr 0.63 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing drop
Time for drop_table_when_MANY_tables (10000): 9 wallclock secs ( 0.44 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing create+drop
Time for create+drop (10000): 26 wallclock secs ( 2.10 usr 0.81 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_key+drop (10000): 40 wallclock secs ( 3.64 usr 0.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 121 wallclock secs ( 8.83 usr 3.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:53:52
Testing the speed of creating and droping tables
Testing with 5000 tables and 10000 loop count
Testing create of tables
Time for create_MANY_tables (5000): 18 wallclock secs ( 1.02 usr 0.62 sys + 0.00 cusr 0.00 csys = 1.64 CPU)
Accessing tables
Time to select_group_when_MANY_tables (5000): 5 wallclock secs ( 0.43 usr 0.87 sys + 0.00 cusr 0.00 csys = 1.30 CPU)
Testing drop
Time for drop_table_when_MANY_tables (5000): 6 wallclock secs ( 0.37 usr 0.63 sys + 0.00 cusr 0.00 csys = 1.00 CPU)
Testing create+drop
Time for create+drop (10000): 33 wallclock secs ( 2.94 usr 0.82 sys + 0.00 cusr 0.00 csys = 3.76 CPU)
Time for create_key+drop (10000): 41 wallclock secs ( 3.05 usr 0.66 sys + 0.00 cusr 0.00 csys = 3.71 CPU)
Total time: 103 wallclock secs ( 7.83 usr 3.60 sys + 0.00 cusr 0.00 csys = 11.43 CPU)
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:55:26
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 113 wallclock secs (18.31 usr 11.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for insert_duplicates (300000): 113 wallclock secs (18.31 usr 11.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 30 wallclock secs (19.98 usr 10.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_key (10:3000000): 31 wallclock secs (19.75 usr 10.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by (10:3000000): 47 wallclock secs (19.72 usr 16.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_diff_key (500:1000): 210 wallclock secs ( 0.28 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range_prefix (5010:42084): 10 wallclock secs ( 2.48 usr 0.55 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (5010:42084): 11 wallclock secs ( 2.61 usr 0.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key_prefix (200000): 130 wallclock secs (67.36 usr 13.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key (200000): 129 wallclock secs (66.05 usr 14.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 8 wallclock secs ( 3.59 usr 0.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (20000:43500): 8 wallclock secs ( 3.74 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (111): 58 wallclock secs ( 0.06 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (15000): 8 wallclock secs ( 4.40 usr 0.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max (60): 31 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (100): 56 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count (100): 46 wallclock secs ( 0.07 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_big (20): 64 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys with functions
Time for update_of_key (500): 25 wallclock secs ( 2.51 usr 2.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for update_of_key_big (501): 33 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update with key
Time for update_with_key (100000): 109 wallclock secs (13.71 usr 11.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of all rows
Time for update_big (500): 65 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
Time for delete_key (10000): 4 wallclock secs ( 0.50 usr 0.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for delete_big (12): 20 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 159 wallclock secs ( 8.91 usr 4.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys
Time for update_of_primary_key_many_keys (256): 67 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Deleting everything from table
Time for delete_big_many_keys (2): 120 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 1592 wallclock secs (254.20 usr 98.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 13:55:36
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 123 wallclock secs (21.22 usr 12.32 sys + 0.00 cusr 0.00 csys = 33.54 CPU)
Testing insert of duplicates
Time for insert_duplicates (100000): 31 wallclock secs ( 5.06 usr 3.72 sys + 0.00 cusr 0.00 csys = 8.78 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 32 wallclock secs (21.78 usr 10.07 sys + 0.00 cusr 0.00 csys = 31.85 CPU)
Time for order_by_big_key (10:3000000): 33 wallclock secs (23.35 usr 10.15 sys + 0.00 cusr 0.00 csys = 33.50 CPU)
Time for order_by_big_key_desc (10:3000000): 36 wallclock secs (23.47 usr 10.27 sys + 0.00 cusr 0.00 csys = 33.74 CPU)
Time for order_by_big_key_prefix (10:3000000): 33 wallclock secs (22.18 usr 9.81 sys + 0.00 cusr 0.00 csys = 31.99 CPU)
Time for order_by_big_key2 (10:3000000): 32 wallclock secs (22.53 usr 9.81 sys + 0.00 cusr 0.00 csys = 32.34 CPU)
Time for order_by_big_key_diff (10:3000000): 74 wallclock secs (22.66 usr 9.76 sys + 0.00 cusr 0.00 csys = 32.42 CPU)
Time for order_by_big (10:3000000): 78 wallclock secs (22.39 usr 9.83 sys + 0.00 cusr 0.00 csys = 32.22 CPU)
Time for order_by_range (500:125750): 8 wallclock secs ( 1.26 usr 0.49 sys + 0.00 cusr 0.00 csys = 1.75 CPU)
Time for order_by_key_prefix (500:125750): 4 wallclock secs ( 0.97 usr 0.57 sys + 0.00 cusr 0.00 csys = 1.54 CPU)
Time for order_by_key2_diff (500:250500): 9 wallclock secs ( 1.30 usr 0.85 sys + 0.00 cusr 0.00 csys = 2.15 CPU)
Time for select_diff_key (500:1000): 193 wallclock secs ( 0.32 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.36 CPU)
Time for select_range_prefix (5010:42084): 13 wallclock secs ( 2.55 usr 0.51 sys + 0.00 cusr 0.00 csys = 3.06 CPU)
Time for select_range_key2 (5010:42084): 12 wallclock secs ( 2.81 usr 0.68 sys + 0.00 cusr 0.00 csys = 3.49 CPU)
Time for select_key_prefix (200000): 141 wallclock secs (86.32 usr 12.05 sys + 0.00 cusr 0.00 csys = 98.37 CPU)
Time for select_key (200000): 132 wallclock secs (53.98 usr 10.53 sys + 0.00 cusr 0.00 csys = 64.51 CPU)
Time for select_key_return_key (200000): 125 wallclock secs (59.92 usr 12.00 sys + 0.00 cusr 0.00 csys = 71.92 CPU)
Time for select_key2 (200000): 139 wallclock secs (78.61 usr 11.08 sys + 0.00 cusr 0.00 csys = 89.69 CPU)
Time for select_key2_return_key (200000): 131 wallclock secs (64.58 usr 9.61 sys + 0.00 cusr 0.00 csys = 74.19 CPU)
Time for select_key2_return_prim (200000): 134 wallclock secs (72.33 usr 11.34 sys + 0.00 cusr 0.00 csys = 83.67 CPU)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 9 wallclock secs ( 4.00 usr 0.89 sys + 0.00 cusr 0.00 csys = 4.89 CPU)
Time for select_range_key2 (20000:43500): 9 wallclock secs ( 4.34 usr 0.72 sys + 0.00 cusr 0.00 csys = 5.06 CPU)
Time for select_group (111): 55 wallclock secs ( 0.02 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.06 CPU)
Time for min_max_on_key (15000): 8 wallclock secs ( 5.12 usr 0.76 sys + 0.00 cusr 0.00 csys = 5.88 CPU)
Time for min_max (60): 30 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for count_on_key (100): 52 wallclock secs ( 0.03 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for count (100): 45 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Time for count_distinct_big (20): 98 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Testing update of keys with functions
Time for update_of_key (50000): 25 wallclock secs ( 2.62 usr 1.44 sys + 0.00 cusr 0.00 csys = 4.06 CPU)
Time for update_of_key_big (501): 35 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.09 CPU)
Testing update with key
Time for update_with_key (300000): 119 wallclock secs (18.44 usr 12.64 sys + 0.00 cusr 0.00 csys = 31.08 CPU)
Time for update_with_key_prefix (100000): 36 wallclock secs ( 6.23 usr 3.85 sys + 0.00 cusr 0.00 csys = 10.08 CPU)
Testing update of all rows
Time for update_big (10): 64 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing left outer join
Time for outer_join_on_key (10:10): 60 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join (10:10): 110 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_found (10:10): 107 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_not_found (500:10): 59 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing INSERT INTO ... SELECT
Time for insert_select_1_key (1): 7 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for insert_select_2_keys (1): 9 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(2): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
Time for delete_key (10000): 4 wallclock secs ( 0.76 usr 0.29 sys + 0.00 cusr 0.00 csys = 1.05 CPU)
Time for delete_all (12): 17 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 137 wallclock secs ( 9.91 usr 6.26 sys + 0.00 cusr 0.00 csys = 16.17 CPU)
Testing update of keys
Time for update_of_primary_key_many_keys (256): 47 wallclock secs ( 0.01 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Deleting rows from the table
Time for delete_big_many_keys (128): 75 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Deleting everything from table
Time for delete_all_many_keys (1): 75 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Total time: 2736 wallclock secs (661.21 usr 182.47 sys + 0.00 cusr 0.00 csys = 843.68 CPU)
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 18:22:00
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 12 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 4 wallclock secs ( 0.81 usr 0.43 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.13 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (410:75949): 2 wallclock secs ( 0.65 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (70000): 205 wallclock secs (20.60 usr 3.98 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (50000): 518 wallclock secs (16.08 usr 3.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_group_on_key_parts (1000:0): 61 wallclock secs ( 1.09 usr 0.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing count(distinct) on the table
Time for count_distinct (1000:2000): 124 wallclock secs ( 0.65 usr 0.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group_on_key (1000:6000): 64 wallclock secs ( 0.37 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group_on_key_parts (1000:100000): 77 wallclock secs ( 0.93 usr 0.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group (1000:100000): 77 wallclock secs ( 0.94 usr 0.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_big (1000:10000000): 559 wallclock secs (69.04 usr 55.99 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 1692 wallclock secs (111.29 usr 65.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing server 'MySQL 3.23.39' at 2001-06-05 14:41:13
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 5 wallclock secs ( 0.80 usr 0.34 sys + 0.00 cusr 0.00 csys = 1.14 CPU)
Test if the database has a query cache
Time for select_query_cache (10000): 120 wallclock secs ( 3.67 usr 0.53 sys + 0.00 cusr 0.00 csys = 4.20 CPU)
Time for select_query_cache2 (10000): 120 wallclock secs ( 3.80 usr 0.57 sys + 0.00 cusr 0.00 csys = 4.37 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.15 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.22 CPU)
Time for select_range (410:1057904): 201 wallclock secs ( 9.05 usr 3.95 sys + 0.00 cusr 0.00 csys = 13.00 CPU)
Time for min_max_on_key (70000): 222 wallclock secs (23.16 usr 3.67 sys + 0.00 cusr 0.00 csys = 26.83 CPU)
Time for count_on_key (50000): 534 wallclock secs (16.58 usr 2.69 sys + 0.00 cusr 0.00 csys = 19.27 CPU)
Time for count_group_on_key_parts (1000:100000): 58 wallclock secs ( 1.16 usr 0.35 sys + 0.00 cusr 0.00 csys = 1.51 CPU)
Testing count(distinct) on the table
Time for count_distinct_key_prefix (1000:1000): 51 wallclock secs ( 0.45 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.53 CPU)
Time for count_distinct (1000:1000): 60 wallclock secs ( 0.42 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.50 CPU)
Time for count_distinct_2 (1000:1000): 63 wallclock secs ( 0.18 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.21 CPU)
Time for count_distinct_group_on_key (1000:6000): 59 wallclock secs ( 0.51 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.58 CPU)
Time for count_distinct_group_on_key_parts (1000:100000): 194 wallclock secs ( 1.12 usr 0.46 sys + 0.00 cusr 0.00 csys = 1.58 CPU)
Time for count_distinct_group (1000:100000): 194 wallclock secs ( 1.21 usr 0.37 sys + 0.00 cusr 0.00 csys = 1.58 CPU)
Time for count_distinct_big (100:1000000): 67 wallclock secs ( 7.77 usr 3.16 sys + 0.00 cusr 0.00 csys = 10.93 CPU)
Total time: 1949 wallclock secs (70.03 usr 16.42 sys + 0.00 cusr 0.00 csys = 86.45 CPU)
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 18:50:12
Testing server 'MySQL 3.23.39' at 2001-06-05 15:13:43
Wisconsin benchmark test
Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (31000): 11 wallclock secs ( 1.12 usr 0.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time to insert (31000): 13 wallclock secs ( 1.59 usr 1.18 sys + 0.00 cusr 0.00 csys = 2.77 CPU)
Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Running actual benchmark
Time for wisc_benchmark (114): 4 wallclock secs ( 1.75 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for wisc_benchmark (114): 5 wallclock secs ( 2.33 usr 0.52 sys + 0.00 cusr 0.00 csys = 2.85 CPU)
Total time: 16 wallclock secs ( 2.87 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 19 wallclock secs ( 3.92 usr 1.70 sys + 0.00 cusr 0.00 csys = 5.62 CPU)
......@@ -25,7 +25,7 @@ use Getopt::Long;
$opt_server="mysql";
$opt_dir="output";
$opt_machine="";
$opt_machine=$opt_cmp="";
$opt_relative=$opt_same_server=$opt_help=$opt_Information=$opt_skip_count=$opt_no_bars=$opt_verbose=0;
GetOptions("Information","help","server=s","cmp=s","machine=s","relative","same-server","dir=s","skip-count","no-bars","html","verbose") || usage();
......@@ -53,10 +53,6 @@ if ($#ARGV == -1)
@ARGV=glob($files);
$automatic_files=1;
}
else
{
$opt_cmp="";
}
foreach (@ARGV)
{
......
......@@ -38,7 +38,7 @@
# as such, and clarify ones such as "mediumint" with comments such as
# "3-byte int" or "same as xxx".
$version="1.56";
$version="1.57";
use DBI;
use Getopt::Long;
......@@ -1539,12 +1539,24 @@ report("insert INTO ... SELECT ...","insert_select",
"insert into crash_q (a) SELECT crash_me.a from crash_me",
"drop table crash_q $drop_attr");
report_trans("transactions","transactions",
[create_table("crash_q",["a integer not null"],[]),
if (!defined($limits{"transactions"}))
{
my ($limit,$type);
$limit="transactions";
print "$limit: ";
foreach $type (('', 'type=bdb', 'type=innodb', 'type=gemini'))
{
undef($limits{$limit});
last if (!report_trans($limit,
[create_table("crash_q",["a integer not null"],[],
$type),
"insert into crash_q values (1)"],
"select * from crash_q",
"drop table crash_q $drop_attr"
);
));
}
print "$limits{$limit}\n";
}
report("atomic updates","atomic_updates",
create_table("crash_q",["a integer not null"],["primary key (a)"]),
......@@ -2500,8 +2512,7 @@ sub report_result
sub report_trans
{
my ($prompt,$limit,$queries,$check,$clear)=@_;
print "$prompt: ";
my ($limit,$queries,$check,$clear)=@_;
if (!defined($limits{$limit}))
{
eval {undef($dbh->{AutoCommit})};
......@@ -2518,7 +2529,6 @@ sub report_trans
safe_query($clear);
} else {
$dbh->{AutoCommit} = 1;
safe_query($clear);
save_config_data($limit,"error",$prompt);
}
} else {
......@@ -2532,8 +2542,7 @@ sub report_trans
}
safe_query($clear);
}
print "$limits{$limit}\n";
return $limits{$limit} ne "no";
return $limits{$limit} ne "yes";
}
......@@ -2961,9 +2970,11 @@ sub sql_concat
sub create_table
{
my($table_name,$fields,$index) = @_;
my($table_name,$fields,$index,$extra) = @_;
my($query,$nr,$parts,@queries,@index);
$extra="" if (!defined($extra));
$query="create table $table_name (";
$nr=0;
foreach $field (@$fields)
......@@ -3015,7 +3026,7 @@ sub create_table
}
}
chop($query);
$query.= ')';
$query.= ") $extra";
unshift(@queries,$query);
return @queries;
}
......
####
#### Hello ... this is a heavily hacked script by Luuk
#### instead of printing the result it makes a nice gif
#### when you want to look at the code ... beware of the
#### ugliest code ever seen .... but it works ...
#### and that's sometimes the only thing you want ... isn't it ...
#### as the original script ... Hope you like it
####
#### Greetz..... Luuk de Boer 1997.
####
## if you want the seconds behind the bar printed or not ...
## or only the one where the bar is too big for the graph ...
## look at line 535 of this program and below ...
## look in sub calculate for allmost all hard/soft settings :-)
# a little program to generate a table of results
# just read all the RUN-*.log files and format them nicely
# Made by Luuk de Boer
# Patched by Monty
use Getopt::Long;
use GD;
$opt_server="mysql";
$opt_cmp="mysql,pg,solid";
$opt_cmp="msql,mysql,pg,solid";
$opt_cmp="empress,mysql,pg,solid";
$opt_dir="output";
$opt_machine="";
$opt_relative=$opt_same_server=$opt_help=$opt_Information=$opt_skip_count=0;
GetOptions("Information","help","server=s","cmp=s","machine=s","relative","same-server","dir=s","skip-count") || usage();
usage() if ($opt_help || $opt_Information);
if ($opt_same_server)
{
$files="$opt_dir/RUN-$opt_server-*$opt_machine";
}
else
{
$files="$opt_dir/RUN-*$opt_machine";
}
$files.= "-cmp-$opt_cmp" if (length($opt_cmp));
$automatic_files=0;
if ($#ARGV == -1)
{
@ARGV=glob($files);
$automatic_files=1;
}
#
# Go trough all RUN files and gather statistics.
#
foreach (@ARGV)
{
$filename = $_;
next if (defined($found{$_})); # remove duplicates
$found{$_}=1;
/RUN-(.*)$/;
$prog = $1;
push(@key_order,$prog);
$next = 0;
open(TMP, "<$filename") || die "Can't open $filename: $!\n";
while (<TMP>)
{
chomp;
if ($next == 0) {
if (/Server version:\s+(\S+.*)/i)
{
$tot{$prog}{'server'} = $1;
}
elsif (/Arguments:\s+(.+)/i)
{
$tot{$prog}{'arguments'} = $1;
# Remove some standard, not informative arguments
$tot{$prog}{'arguments'} =~ s/--log|--use-old-results|--server=\S+|--cmp=\S+|--user=\S+|--pass=\S+|--machine=\S+//g;
$tot{$prog}{'arguments'} =~ s/\s+/ /g;
}
elsif (/Comments:\s+(.+)/i) {
$tot{$prog}{'comments'} = $1;
} elsif (/^(\S+):\s*(estimated\s|)total\stime:\s+(\d+)\s+secs/i)
{
$tmp = $1; $tmp =~ s/://;
$tot{$prog}{$tmp} = [ $3, (length($2) ? "+" : "")];
$op1{$tmp} = $tmp;
} elsif (/Totals per operation:/i) {
$next = 1;
next;
}
}
elsif ($next == 1)
{
if (/^(\S+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s*([+|?])*/)
{
$tot1{$prog}{$1} = [$2,$6,$7];
$op{$1} = $1;
#print "TEST - $_ \n * $prog - $1 - $2 - $6 - $7 ****\n";
# $prog - filename
# $1 - operation
# $2 - time in secs
# $6 - number of loops
# $7 - nothing / + / ? / * => estimated time ...
# get the highest value ....
$highest = ($2/$6) if (($highest < ($2/$6)) && ($1 !~/TOTALS/i));
$gifcount++;
$giftotal += ($2/$6);
}
}
}
}
if (!%op)
{
print "Didn't find any files matching: '$files'\n";
print "Use the --cmp=server,server option to compare benchmarks\n";
exit 1;
}
# everything is loaded ...
# now we have to create a fancy output :-)
# I prefer to redirect scripts instead to force it to file ; Monty
#
# open(RES, ">$resultfile") || die "Can't write to $resultfile: $!\n";
# select(RES)
#
#print <<EOF;
#<cut for this moment>
#
#EOF
if ($opt_relative)
{
# print "Column 1 is in seconds. All other columns are presented relative\n";
# print "to this. 1.00 is the same, bigger numbers indicates slower\n\n";
}
#print "The result logs which where found and the options:\n";
if ($automatic_files)
{
if ($key_order[$i] =~ /^$opt_server/)
{
if ($key_order[$i] =~ /^$opt_server/)
{
unshift(@key_order,$key_order[$i]);
splice(@key_order,$i+1,1);
}
}
}
# extra for mysql and mysql_pgcc
#$number1 = shift(@key_order);
#$number2 = shift(@key_order);
#unshift(@key_order,$number1);
#unshift(@key_order,$number2);
# Print header
$column_count=0;
foreach $key (@key_order)
{
$column_count++;
# printf "%2d %-40.40s: %s %s\n", $column_count, $key,
# $tot{$key}{'server'}, $tot{$key}{'arguments'};
# print "Comments: $tot{$key}{'comments'}\n"
# if ($tot{$key}{'comments'} =~ /\w+/);
}
#print "\n";
$namewidth=$opt_skip_count ? 20 :25;
$colwidth= $opt_relative ? 9 : 6;
print_sep("=");
#printf "%-$namewidth.${namewidth}s|", "Operation";
$count = 1;
foreach $key (@key_order)
{
# printf "%${colwidth}d|", $count;
$count++;
}
#print "\n";
#print_sep("-");
#print_string("Results per test:");
#print_sep("-");
foreach $key (sort {$a cmp $b} keys %op1)
{
# printf "%-$namewidth.${namewidth}s|", $key;
$first=undef();
foreach $server (@key_order)
{
print_value($first,$tot{$server}{$key}->[0],$tot{$server}{$key}->[1]);
$first=$tot{$server}{$key}->[0] if (!defined($first));
}
# print "\n";
}
print_sep("-");
print_string("The results per operation:");
print_sep("-");
$luukcounter = 1;
foreach $key (sort {$a cmp $b} keys %op)
{
next if ($key =~ /TOTALS/i);
$tmp=$key;
$tmp.= " (" . $tot1{$key_order[0]}{$key}->[1] . ")" if (!$skip_count);
# printf "%-$namewidth.${namewidth}s|", $tmp;
$first=undef();
foreach $server (@key_order)
{
print_value($first,$tot1{$server}{$key}->[0],$tot1{$server}{$key}->[2]);
$first=$tot1{$server}{$key}->[0] if (!defined($first));
}
# print "\n";
$luukcounter++;
}
#print_sep("-");
$key="TOTALS";
#printf "%-$namewidth.${namewidth}s|", $key;
$first=undef();
foreach $server (@key_order)
{
# print_value($first,$tot1{$server}{$key}->[0],$tot1{$server}{$key}->[2]);
$first=$tot1{$server}{$key}->[0] if (!defined($first));
}
#print "\n";
#print_sep("=");
&make_gif;
exit 0;
#
# some format functions;
#
sub print_sep
{
my ($sep)=@_;
# print $sep x ($namewidth + (($colwidth+1) * $column_count)+1),"\n";
}
sub print_value
{
my ($first,$value,$flags)=@_;
my ($tmp);
if (defined($value))
{
if (!defined($first) || !$opt_relative)
{
$tmp=sprintf("%d",$value);
}
else
{
$first=1 if (!$first); # Assume that it took one second instead of 0
$tmp= sprintf("%.2f",$value/$first);
}
if (defined($flags))
{
$tmp="+".$tmp if ($flags =~ /\+/);
$tmp="?".$tmp if ($flags =~ /\?/);
}
}
else
{
$tmp="";
}
$tmp= " " x ($colwidth-length($tmp)) . $tmp if (length($tmp) < $colwidth);
# print $tmp . "|";
}
sub print_string
{
my ($str)=@_;
my ($width);
$width=$namewidth + ($colwidth+1)*$column_count;
$str=substr($str,1,$width) if (length($str) > $width);
# print($str," " x ($width - length($str)),"|\n");
}
sub usage
{
exit(0);
}
###########################################
###########################################
###########################################
# making here a gif of the results ... (lets try it :-))
# luuk .... 1997
###########################################
## take care that $highest / $giftotal / $gifcount / $luukcounter
## are getting there value above ... so don't forget them while
## copying the code to some other program ....
sub make_gif {
&gd; # some base things ....
&legend; # make the nice legend
&lines; # yep sometimes you have to print some lines
&gif("gif/benchmark2-".$opt_cmp); # and finally we can print all to a gif file ...
}
##### mmm we are finished now ...
# first we have to calculate some limits and some other stuff
sub calculate {
# here is the list which I have to know to make everything .....
# the small border width ... $sm_border =
# the border default $border =
# the step default ... if it must be calculated then no value $step =
# the highest number $highest =
# the max length of the text of the x borders $max_len_lb=
# the max length of a legend entry $max_len_le=
# number of entries in the legend $num_legen =
# the length of the color blocks for the legend $legend_block=
# the width of the gif ...if it must be calculated - no value $width =
# the height of the gif .. if it must be calculated - no value $height =
# the width of the grey field ' ' ' ' $width_grey=
# the height of the grey field ' ' ' ' $height_grey=
# number of dashed lines $lines=
# if bars must overlap how much they must be overlapped $overlap=
# titlebar title of graph in two colors big $titlebar=
# titlebar1 sub title of graph in small font in black $titlebar1=
# xlabel $xlabel=
# ylabel $ylabel=
# the name of the gif ... $name=
# then the following things must be knows .....
# xlabel below or on the left side ?
# legend yes/no?
# where must the legend be placed?
# must the xlabel be printed horizontal or vertical?
# must the ylabel be printed horizontal or vertical?
# must the graph be a line or a bar graph?
# is a xlabel several different entries or some sub entries of one?
# so xlabel 1 => test1=10, test2=15, test3=7 etc
# or xlabel 1 => test1a=12, test1b=10, test1c=7 etc
# must the bars overlap (only with the second example I think)
# must the number be printed above or next to the bar?
# when must the number be printed .... only when it extends the graph ...???
# the space between the bars .... are that the same width of the bars ...
# or is it a separate space ... defined ???
# must the date printed below or some where else ....
#calculate all space for text and other things ....
$sm_border = 8; # the grey border around ...
$border = 40; #default ...
$left_border = 2.75 * $border; #default ...
$right_border = $border; #default ...
$up_border = $border; #default ...
$down_border = $border; # default ...
$step = ($height - $up_border - $down_border)/ ($luukcounter + (($#key_order + 1) * $luukcounter));
# can set $step to get nice graphs ... and change the format ...
$step = 8; # set hard the step value
$gifavg = ($giftotal/$gifcount);
$highest = 2 * $gifavg;
$highest = 1; # set hard the highest value ...
$xhigh = int($highest + .5 * $highest);
# here to get the max lenght of the test entries ...
# so we can calculate the with of the left border
foreach $oper (sort keys (%op)) {
$max_len_lb = length($oper) if (length($oper) > $max_len_lb);
# print "oper = $oper - $max_len_lb\n";
}
$max_len_lb = $max_len_lb * gdSmallFont->width;
$left_border = (3*$sm_border) + $max_len_lb;
$down_border = (4*$sm_border) + (gdSmallFont->width*(length($xhigh)+3)) + (gdSmallFont->height *2);
$right_border = (3*$sm_border) + 3 + (gdSmallFont->width*(length($highest)+5));
# calculate the space for the legend .....
foreach $key (@key_order) {
$tmp = $key;
$tmp =~ s/-cmp-$opt_cmp//i;
$giflegend = sprintf "%-24.24s: %-40.40s",$tmp,$tot{$key}{'server'};
$max_len_le = length($giflegend) if (length($giflegend) > $max_len_le);
}
$max_len_le = $max_len_le * gdSmallFont->width;
$legend_block = 10; # the length of the block in the legend
$max_high_le = (($#key_order + 1)*(gdSmallFont->height+2)) + (2*$legend_block);
$down_border += $max_high_le;
$up_border = (5 * $sm_border) + gdSmallFont->height + gdLargeFont->height;
print "Here some things we already know ....\n";
# print "luukcounter = $luukcounter (number of tests)\n";
# print "gifcount = $gifcount (number of total entries)\n";
# print "giftotal = $giftotal (total secs)\n";
# print "gifavg = $gifavg\n";
# print "highest = $highest\n";
# print "xhigh = $xhigh\n";
# print "step = $step -- $#key_order\n";
# print "max_len_lb = $max_len_lb\n";
# printf "Small- width %d - height %s\n",gdSmallFont->width,gdSmallFont->height;
# printf "Tiny- width %d - height %s\n",gdTinyFont->width,gdTinyFont->height;
}
sub gd {
&calculate;
$width = 600; # the width ....
$height = 500; # the height ...
$width_greyfield = 430;
# when $step is set ... count the height ....????
$width = $width_greyfield + $left_border + $right_border;
$height = ($step * ($luukcounter + ($luukcounter * ($#key_order + 1)))) + $down_border + $up_border;
$b_width = $width - ($left_border + $right_border); # width within the grey field
$overlap = 0; # how far each colum can fall over each other ...nice :-)
# make the gif image ....
$im = new GD::Image($width,$height);
# allocate the colors to use ...
$white = $im->colorAllocate(255,255,255);
$black = $im->colorAllocate(0,0,0);
$paper_white = $im->colorAllocate(220, 220, 220);
$grey1 = $im->colorAllocate(240, 240, 240);
$grey4 = $im->colorAllocate(229, 229, 229);
$grey2 = $im->colorAllocate(102, 102, 102);
$grey3 = $im->colorAllocate(153, 153, 153);
$red = $im->colorAllocate(205,0,0); # msql
$lred = $im->colorAllocate(255,0,0);
$blue = $im->colorAllocate(0,0,205); # mysql
$lblue = $im->colorAllocate(0,0,255); # mysql_pgcc
$green = $im->colorAllocate(0, 205, 0); # postgres
$lgreen = $im->colorAllocate(0, 255, 0); # pg_fast
$orange = $im->colorAllocate(205,133, 0); # solid
$lorange = $im->colorAllocate(255, 165, 0); # Adabas
$yellow = $im->colorAllocate(205,205,0); # empress
$lyellow = $im->colorAllocate(255,255,0);
$magenta = $im->colorAllocate(255,0,255); # oracle
$lmagenta = $im->colorAllocate(255,200,255);
$cyan = $im->colorAllocate(0,205,205); # sybase
$lcyan = $im->colorAllocate(0,255,255);
$sienna = $im->colorAllocate(139,71,38); # db2
$lsienna = $im->colorAllocate(160,82,45);
$coral = $im->colorAllocate(205,91,69); # Informix
$lcoral = $im->colorAllocate(255,114,86);
$peach = $im->colorAllocate(205,175,149);
$lpeach = $im->colorAllocate(255,218,185);
@colors = ($red, $blue, $green, $orange, $yellow, $magenta, $cyan, $sienna, $coral, $peach);
@lcolors = ($lred, $lblue, $lgreen, $lorange, $lyellow, $lmagenta, $lcyan, $lsienna, $lcoral, $lpeach);
# set a color per server so in every result it has the same color ....
foreach $key (@key_order) {
if ($tot{$key}{'server'} =~ /mysql/i) {
if ($key =~ /mysql_pgcc/i || $key =~ /mysql_odbc/i) {
$tot{$key}{'color'} = $lblue;
} else {
$tot{$key}{'color'} = $blue;
}
} elsif ($tot{$key}{'server'} =~ /msql/i) {
$tot{$key}{'color'} = $lred;
} elsif ($tot{$key}{'server'} =~ /postgres/i) {
if ($key =~ /pg_fast/i) {
$tot{$key}{'color'} = $lgreen;
} else {
$tot{$key}{'color'} = $green;
}
} elsif ($tot{$key}{'server'} =~ /solid/i) {
$tot{$key}{'color'} = $lorange;
} elsif ($tot{$key}{'server'} =~ /empress/i) {
$tot{$key}{'color'} = $lyellow;
} elsif ($tot{$key}{'server'} =~ /oracle/i) {
$tot{$key}{'color'} = $magenta;
} elsif ($tot{$key}{'server'} =~ /sybase/i) {
$tot{$key}{'color'} = $cyan;
} elsif ($tot{$key}{'server'} =~ /db2/i) {
$tot{$key}{'color'} = $sienna;
} elsif ($tot{$key}{'server'} =~ /informix/i) {
$tot{$key}{'color'} = $coral;
} elsif ($tot{$key}{'server'} =~ /microsoft/i) {
$tot{$key}{'color'} = $peach;
} elsif ($tot{$key}{'server'} =~ /access/i) {
$tot{$key}{'color'} = $lpeach;
} elsif ($tot{$key}{'server'} =~ /adabas/i) {
$tot{$key}{'color'} = $lorange;
}
}
# make the nice little borders
# left bar
$poly0 = new GD::Polygon;
$poly0->addPt(0,0);
$poly0->addPt($sm_border,$sm_border);
$poly0->addPt($sm_border,($height - $sm_border));
$poly0->addPt(0,$height);
$im->filledPolygon($poly0,$grey1);
$im->polygon($poly0, $grey4);
# upper bar
$poly3 = new GD::Polygon;
$poly3->addPt(0,0);
$poly3->addPt($sm_border,$sm_border);
$poly3->addPt(($width - $sm_border),$sm_border);
$poly3->addPt($width,0);
$im->polygon($poly3, $grey4);
$tmptime = localtime(time);
$im->string(gdSmallFont,($width - $sm_border - (gdSmallFont->width * length($tmptime))),($height - ($sm_border) - gdSmallFont->height), $tmptime, $grey3);
# right bar
$poly1 = new GD::Polygon;
$poly1->addPt($width,0);
$poly1->addPt(($width - $sm_border),$sm_border);
$poly1->addPt(($width - $sm_border),($height - $sm_border));
$poly1->addPt($width,$height);
$im->filledPolygon($poly1, $grey3);
$im->stringUp(gdSmallFont,($width - 10),($height - (2 * $sm_border)), "Made by Luuk de Boer - 1997 (c)", $blue);
#below bar
$poly2 = new GD::Polygon;
$poly2->addPt(0,$height);
$poly2->addPt($sm_border,($height - $sm_border));
$poly2->addPt(($width - $sm_border),($height - $sm_border));
$poly2->addPt($width,$height);
$im->filledPolygon($poly2, $grey2);
# do the black line around where in you will print ... (must be done at last
# but is hard to develop with ... but the filled grey must be done first :-)
$im->filledRectangle($left_border,$up_border,($width - ($right_border)),($height-$down_border),$grey4);
# print the nice title ...
$titlebar = "MySQL Benchmark results"; # head title ...
$titlebar1 = "Compare $opt_cmp "; # sub title
$header2 = "seconds/test"; # header value
$center = ($width / 2) - ((gdLargeFont->width * length($titlebar)) / 2);
$center1 = ($width / 2) - ((gdSmallFont->width * length($titlebar1)) / 2);
$center2 = ($width_greyfield/2) - ((gdSmallFont->width*length($header2))/2);
$bovenkant = $sm_border * 3;
$bovenkant1 = $bovenkant + gdLargeFont->height + (.5*$sm_border);
$bovenkant2 = $height - $down_border + (1*$sm_border) + (gdSmallFont->width*(length($xhigh)+3));
$im->string(gdLargeFont,($center),($bovenkant + 1), $titlebar, $grey3);
$im->string(gdLargeFont,($center),($bovenkant), $titlebar, $red);
$im->string(gdSmallFont,($center1),($bovenkant1), $titlebar1, $black);
$im->string(gdSmallFont,($left_border + $center2),($bovenkant2), $header2, $black);
$xlength = $width - $left_border - $right_border;
$lines = 10; # hard coded number of dashed lines
$xverh = $xlength / $xhigh;
# print " de verhouding ===> $xverh --- $xlength -- $xhigh \n";
$xstep = ($xhigh / $lines) * $xverh;
$teller = 0;
# make the nice dashed lines and print the values ...
for ($i = 0; $i <= $lines; $i++) {
$st2 = ($left_border) + ($i * $xstep);
$im->dashedLine($st2,($height-$down_border),$st2,($up_border), $grey3);
if (($i != 0) && ($teller == 2)) {
$st3 = sprintf("%.2f", $i*($xhigh/$lines));
$im->stringUp(gdTinyFont,($st2 - (gdSmallFont->height/2)),($height - $down_border +(.5*$sm_border) + (gdSmallFont->width*(length($xhigh)+3))), $st3, $black);
$teller = 0;
}
$teller++;
}
$im->rectangle($left_border,$up_border,($width - ($right_border)),($height-$down_border),$black);
}
sub legend {
# make the legend ...
$legxbegin = $left_border;
$legybegin = $height - $down_border + (2*$sm_border) + (gdSmallFont->width * (length($xhigh) + 3)) + gdSmallFont->height;
$legxend = $legxbegin + $max_len_le + (4*$legend_block);
$legxend = $legxbegin + $width_greyfield;
$legyend = $legybegin + $max_high_le;
$im->filledRectangle($legxbegin,$legybegin,$legxend,$legyend,$grey4);
$im->rectangle($legxbegin,$legybegin,$legxend,$legyend,$black);
# calculate the space for the legend .....
$c = 0; $i = 1;
$legybegin += $legend_block;
foreach $key (@key_order) {
$xtmp = $legxbegin + $legend_block;
$ytmp = $legybegin + ($c * (gdSmallFont->height +2));
$xtmp1 = $xtmp + $legend_block;
$ytmp1 = $ytmp + gdSmallFont->height;
$im->filledRectangle($xtmp,$ytmp,$xtmp1,$ytmp1,$tot{$key}{'color'});
$im->rectangle($xtmp,$ytmp,$xtmp1,$ytmp1,$black);
$tmp = $key;
$tmp =~ s/-cmp-$opt_cmp//i;
$giflegend = sprintf "%-24.24s: %-40.40s",$tmp,$tot{$key}{'server'};
$xtmp2 = $xtmp1 + $legend_block;
$im->string(gdSmallFont,$xtmp2,$ytmp,"$giflegend",$black);
$c++;
$i++;
# print "$c $i -> $giflegend\n";
}
}
sub lines {
$g = 0;
$i = 0;
$ybegin = $up_border + ((($#key_order + 2)/2)*$step);
$xbegin = $left_border;
foreach $key (sort {$a cmp $b} keys %op) {
next if ($key =~ /TOTALS/i);
$c = 0;
# print "key - $key\n";
foreach $server (@key_order) {
$tot1{$server}{$key}->[1] = 1 if ($tot1{$server}{$key}->[1] == 0);
$entry = $tot1{$server}{$key}->[0]/$tot1{$server}{$key}->[1];
$ytmp = $ybegin + ($i * $step) ;
$xtmp = $xbegin + ($entry * $xverh) ;
$ytmp1 = $ytmp + $step;
# print "$server -- $entry --x $xtmp -- y $ytmp - $c\n";
$entry1 = sprintf("%.2f", $entry);
if ($entry < $xhigh) {
$im->filledRectangle($xbegin, $ytmp, $xtmp, $ytmp1, $tot{$server}{'color'});
$im->rectangle($xbegin, $ytmp, $xtmp, $ytmp1, $black);
# print the seconds behind the bar (look below for another entry)
# this entry is for the bars that are not greater then the max width
# of the grey field ...
# $im->string(gdTinyFont,(($xtmp+3),($ytmp),"$entry1",$black));
# if you want the seconds in the color of the bar just uncomment it (below)
# $im->string(gdTinyFont,(($xtmp+3),($ytmp),"$entry1",$tot{$server}{'color'}));
} else {
$im->filledRectangle($xbegin, $ytmp, ($xbegin + ($xhigh*$xverh)), $ytmp1, $tot{$server}{'color'});
$im->rectangle($xbegin, $ytmp, ($xbegin + ($xhigh*$xverh)), $ytmp1, $black);
# print the seconds behind the bar (look below for another entry)
# here is the seconds printed behind the bar is the bar is too big for
# the graph ... (seconds is greater then xhigh ...)
$im->string(gdTinyFont, ($xbegin + ($xhigh*$xverh)+3),($ytmp),"$entry1",$black);
# if you want the seconds in the color of the bar just uncomment it (below)
# $im->string(gdTinyFont, ($xbegin + ($xhigh*$xverh)+3),($ytmp),"$entry1",$colors[$c]);
}
$c++;
$i++;
}
# see if we can center the text between the bars ...
$ytmp2 = $ytmp1 - (((($c)*$step) + gdSmallFont->height)/2);
$im->string(gdSmallFont,($sm_border*2),$ytmp2,$key, $black);
$i++;
}
}
sub gif {
my ($name) = @_;
$name_gif = $name . ".gif";
print "name --> $name_gif\n";
open (GIF, "> $name_gif") || die "Can't open $name_gif: $!\n";
print GIF $im->gif;
close (GIF);
}
#This file is automaticly generated by crash-me 1.54
#This file is automaticly generated by crash-me 1.57
NEG=yes # update of column= -column
Need_cast_for_null=no # Need to cast NULL for arithmetic
......@@ -36,7 +36,7 @@ constraint_check=no # Column constraints
constraint_check_table=no # Table constraints
constraint_null=yes # NULL constraint (SyBase style)
crash_me_safe=yes # crash me safe
crash_me_version=1.54 # crash me version
crash_me_version=1.57 # crash me version
create_default=yes # default value for column
create_default_func=no # default value function for column
create_if_not_exists=yes # create table if not exists
......@@ -394,7 +394,7 @@ select_limit2=yes # SELECT with LIMIT #,#
select_string_size=1048565 # constant string size in SELECT
select_table_update=no # Update with sub select
select_without_from=yes # SELECT without FROM
server_version=MySQL 3.23.29 gamma # server version
server_version=MySQL 3.23.39 debug # server version
simple_joins=yes # ANSI SQL simple joins
storage_of_float=round # Storage of float values
subqueries=no # subqueries
......@@ -402,7 +402,7 @@ table_alias=yes # Table alias
table_name_case=no # case independent table names
table_wildcard=yes # Select table_name.*
temporary_table=yes # temporary tables
transactions=no # transactions
transactions=yes # constant string size in where
truncate_table=yes # truncate
type_extra_abstime=no # Type abstime
type_extra_bfile=no # Type bfile
......
#This file is automaticly generated by crash-me 1.54
#This file is automaticly generated by crash-me 1.57
NEG=yes # update of column= -column
Need_cast_for_null=no # Need to cast NULL for arithmetic
......@@ -36,7 +36,7 @@ constraint_check=no # Column constraints
constraint_check_table=no # Table constraints
constraint_null=yes # NULL constraint (SyBase style)
crash_me_safe=yes # crash me safe
crash_me_version=1.54 # crash me version
crash_me_version=1.57 # crash me version
create_default=yes # default value for column
create_default_func=no # default value function for column
create_if_not_exists=yes # create table if not exists
......@@ -394,7 +394,7 @@ select_limit2=yes # SELECT with LIMIT #,#
select_string_size=1048565 # constant string size in SELECT
select_table_update=no # Update with sub select
select_without_from=yes # SELECT without FROM
server_version=MySQL 3.23.29 gamma # server version
server_version=MySQL 3.23.39 debug # server version
simple_joins=yes # ANSI SQL simple joins
storage_of_float=round # Storage of float values
subqueries=no # subqueries
......@@ -402,7 +402,7 @@ table_alias=yes # Table alias
table_name_case=no # case independent table names
table_wildcard=yes # Select table_name.*
temporary_table=yes # temporary tables
transactions=no # transactions
transactions=yes # constant string size in where
truncate_table=yes # truncate
type_extra_abstime=no # Type abstime
type_extra_bfile=no # Type bfile
......
#This file is automaticly generated by crash-me 1.56
#This file is automaticly generated by crash-me 1.57
NEG=yes # update of column= -column
Need_cast_for_null=no # Need to cast NULL for arithmetic
......@@ -36,7 +36,7 @@ constraint_check=yes # Column constraints
constraint_check_table=yes # Table constraints
constraint_null=yes # NULL constraint (SyBase style)
crash_me_safe=yes # crash me safe
crash_me_version=1.56 # crash me version
crash_me_version=1.57 # crash me version
create_default=yes # default value for column
create_default_func=yes # default value function for column
create_if_not_exists=no # create table if not exists
......
......@@ -800,18 +800,29 @@ sub reconnect_on_errors
sub vacuum
{
my ($self,$full_vacuum,$dbh_ref)=@_;
my ($loop_time,$end_time,$dbh);
my ($self,$full_vacuum,$dbh_ref,@tables)=@_;
my ($loop_time,$end_time,$dbh,$table);
if (defined($full_vacuum))
{
$$dbh_ref->disconnect; $$dbh_ref= $self->connect();
}
$dbh=$$dbh_ref;
$loop_time=new Benchmark;
if ($#tables >= 0)
{
foreach $table (@tables)
{
$dbh->do("vacuum analyze $table") || die "Got error: $DBI::errstr when executing 'vacuum analyze $table'\n";
$dbh->do("vacuum $table") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
}
}
else
{
# $dbh->do("vacuum pg_attributes") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
# $dbh->do("vacuum pg_index") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
$dbh->do("vacuum analyze") || die "Got error: $DBI::errstr when executing 'vacuum analyze'\n";
$dbh->do("vacuum") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
$dbh->do("vacuum pg_attributes") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
$dbh->do("vacuum pg_index") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
$dbh->do("vacuum analyze") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
}
$end_time=new Benchmark;
print "Time for book-keeping (1): " .
Benchmark::timestr(Benchmark::timediff($end_time, $loop_time),"all") . "\n\n";
......
......@@ -250,10 +250,6 @@ if ($limits->{'unique_index'})
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
}
#if ($opt_fast && defined($server->{vacuum}))
#{
# $server->vacuum(1,\$dbh);
#}
####
#### Do some selects on the table
......@@ -1410,10 +1406,6 @@ if ($limits->{'insert_multi_value'})
print "Time for multiple_value_insert (" . ($opt_loop_count) . "): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
if ($opt_fast && defined($server->{vacuum}))
{
$server->vacuum(1,\$dbh);
}
if ($opt_lock_tables)
{
$sth = $dbh->do("UNLOCK TABLES ") || die $DBI::errstr;
......
......@@ -679,7 +679,7 @@ ha_innobase::bas_ext() const
/* out: file extension strings, currently not
used */
{
static const char* ext[] = {".not_used", NullS};
static const char* ext[] = {".InnoDB", NullS};
return(ext);
}
......@@ -779,6 +779,13 @@ ha_innobase::open(
if (NULL == (ib_table = dict_table_get(norm_name, NULL))) {
fprintf(stderr, "\
Cannot find table %s from the internal data dictionary\n\
of InnoDB though the .frm file for the table exists. Maybe you have deleted\n\
and created again an InnoDB database but forgotten to delete the\n\
corresponding .frm files of old InnoDB tables?\n",
norm_name);
free_share(share);
my_free((char*) upd_buff, MYF(0));
my_errno = ENOENT;
......
......@@ -1758,7 +1758,7 @@ Item_func_get_user_var::val_str(String *str)
return NULL;
switch (entry->type) {
case REAL_RESULT:
str->set(*(double*) entry->value);
str->set(*(double*) entry->value,decimals);
break;
case INT_RESULT:
str->set(*(longlong*) entry->value);
......
......@@ -2839,7 +2839,7 @@ struct show_var_st init_vars[]= {
{"innodb_log_arch_dir", (char*) &innobase_log_arch_dir, SHOW_CHAR_PTR},
{"innodb_log_archive", (char*) &innobase_log_archive, SHOW_MY_BOOL},
{"innodb_log_group_home_dir", (char*) &innobase_log_group_home_dir, SHOW_CHAR_PTR},
{"innodb_unix_file_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR},
{"innodb_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR},
#endif
{"interactive_timeout", (char*) &net_interactive_timeout, SHOW_LONG},
{"join_buffer_size", (char*) &join_buff_size, SHOW_LONG},
......@@ -3113,6 +3113,7 @@ static void usage(void)
puts("\
--innodb_data_home_dir=dir The common part for Innodb table spaces\n\
--innodb_data_file_path=dir Path to individual files and their sizes\n\
--innodb_flush_method=# Which method to flush data\n\
--innodb_flush_log_at_trx_commit[=#]\n\
Set to 0 if you don't want to flush logs\n\
--innodb_log_arch_dir=dir Where full logs should be archived\n\
......
......@@ -384,6 +384,9 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
thd->in_lock_tables=1;
result=reopen_tables(thd,1,1);
thd->in_lock_tables=0;
/* Set version for table */
for (TABLE *table=thd->open_tables; table ; table=table->next)
table->version=refresh_version;
}
VOID(pthread_mutex_unlock(&LOCK_open));
if (if_wait_for_refresh)
......
......@@ -1362,6 +1362,7 @@ select_create::prepare(List<Item> &values)
if (info.handle_duplicates == DUP_IGNORE ||
info.handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
table->file->deactivate_non_unique_index((ha_rows) 0);
DBUG_RETURN(0);
}
......
......@@ -221,6 +221,13 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
db_options|=HA_OPTION_PACK_RECORD;
file=get_new_handler((TABLE*) 0, create_info->db_type);
if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
(file->option_flag() & HA_NO_TEMP_TABLES))
{
my_error(ER_ILLEGAL_HA,MYF(0),table_name);
DBUG_RETURN(-1);
}
/* Don't pack keys in old tables if the user has requested this */
while ((sql_field=it++))
......@@ -1240,8 +1247,17 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
if (drop->type == Alter_drop::COLUMN &&
!my_strcasecmp(field->field_name, drop->name))
{
/* Reset auto_increment value if it was dropped */
if (MTYP_TYPENR(field->unireg_check) == Field::NEXT_NUMBER &&
!(create_info->used_fields & HA_CREATE_USED_AUTO))
{
create_info->auto_increment_value=0;
create_info->used_fields|=HA_CREATE_USED_AUTO;
}
break;
}
}
if (drop)
{
drop_it.remove();
......
......@@ -360,6 +360,7 @@ fi
%attr(755, root, root) /usr/bin/mysql
%attr(755, root, root) /usr/bin/mysqlaccess
%attr(755, root, root) /usr/bin/mysqladmin
%attr(755, root, root) /usr/bin/mysqlcheck
%attr(755, root, root) /usr/bin/mysql_find_rows
%attr(755, root, root) /usr/bin/mysqldump
%attr(755, root, root) /usr/bin/mysqlimport
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment