Commit cf42a955 authored by unknown's avatar unknown

Added ABS() to make tests more portable.

New postgresql crash-me file.
Increased blob size in benchmarks from 65K to 1M.


mysql-test/t/select.test:
  Added ABS() to make tests more portable
mysys/tree.c:
  Added missing call to tree->free  (MySQL didn't use this)
sql-bench/Comments/postgres.benchmark:
  Updated documentation
sql-bench/bench-init.pl.sh:
  Updated version number (changed blob size)
sql-bench/limits/pg.cfg:
  New postgres results
sql-bench/server-cfg.sh:
  Updated to PostgreSQL 7.1.1
sql-bench/test-connect.sh:
  Changed select_big -> select_big_str
tests/fork_big.pl:
  Added count(distinct) test
parent 1b4d4338
...@@ -1609,7 +1609,7 @@ select t2.fld1,count(*) from t2,t3 where t2.fld1=158402 and t3.name=t2.fld3 grou ...@@ -1609,7 +1609,7 @@ select t2.fld1,count(*) from t2,t3 where t2.fld1=158402 and t3.name=t2.fld3 grou
# #
select sum(Period)/count(*) from t1; select sum(Period)/count(*) from t1;
select companynr,count(price) as "count",sum(price) as "sum" ,sum(price)/count(price)-avg(price) as "diff",(0+count(price))*companynr as func from t3 group by companynr; select companynr,count(price) as "count",sum(price) as "sum" ,abs(sum(price)/count(price)-avg(price)) as "diff",(0+count(price))*companynr as func from t3 group by companynr;
select companynr,sum(price)/count(price) as avg from t3 group by companynr having avg > 70000000 order by avg; select companynr,sum(price)/count(price) as avg from t3 group by companynr having avg > 70000000 order by avg;
# #
......
...@@ -251,6 +251,8 @@ int tree_delete(TREE *tree, void *key) ...@@ -251,6 +251,8 @@ int tree_delete(TREE *tree, void *key)
} }
if (remove_colour == BLACK) if (remove_colour == BLACK)
rb_delete_fixup(tree,parent); rb_delete_fixup(tree,parent);
if (tree->free)
(*tree->free)(ELEMENT_KEY(tree,element));
my_free((gptr) element,MYF(0)); my_free((gptr) element,MYF(0));
tree->elements_in_tree--; tree->elements_in_tree--;
return 0; return 0;
......
...@@ -18,39 +18,43 @@ ...@@ -18,39 +18,43 @@
# corresponding file. If you are using csh, use setenv. # corresponding file. If you are using csh, use setenv.
# #
export POSTGRES_INCLUDE=/usr/local/pgsql/include export POSTGRES_INCLUDE=/usr/local/pg/include
export POSTGRES_LIB=/usr/local/pgsql/lib export POSTGRES_LIB=/usr/local/pg/lib
PATH=$PATH:/usr/local/pgsql/bin PATH=$PATH:/usr/local/pg/bin
MANPATH=$MANPATH:/usr/local/pgsql/man MANPATH=$MANPATH:/usr/local/pg/man
# #
# Add the following line to /etc/ld.so.conf: # Add the following line to /etc/ld.so.conf:
# #
/usr/local/pgsql/lib /usr/local/pg/lib
and run ldconfig. and run ldconfig.
# # untar the postgres source distribution, cd to postgresql-*
# untar the postgres source distribution and cd to src/ # and run the following commands:
# run the following commands:
#
./configure CFLAGS=-O3 ./configure
gmake gmake
gmake install gmake install
mkdir /usr/local/pgsql/data mkdir /usr/local/pg/data
chown postgres /usr/local/pgsql/data chown postgres /usr/local/pg/data
su - postgres su - postgres
/usr/local/pgsql/bin/initdb -D /usr/local/pgsql/data /usr/local/pg/bin/initdb -D /usr/local/pg/data
su postgres -c "/usr/local/pgsql/bin/postmaster -o -F -D /usr/local/pgsql/data" & /usr/local/pg/bin/postmaster -o -F -D /usr/local/pg/data &
su postgres -c "/usr/local/pgsql/bin/createdb test" /usr/local/pg/bin/createdb test
exit
# #
# Second, install packages DBD-Pg-0.95.tar.gz and DBI-1.14.tar.gz, # Second, install packages DBD-Pg-1.00.tar.gz and DBI-1.14.tar.gz,
# available from http://www.perl.com/CPAN/ # available from http://www.perl.com/CPAN/
#
export POSTGRES_LIB=/usr/local/pg/lib/
export POSTGRES_INCLUDE=/usr/local/pg/include/postgresql
perl Makefile.PL
make
make install
# #
# Now we run the test that can be found in the sql-bench directory in the # Now we run the test that can be found in the sql-bench directory in the
...@@ -59,17 +63,16 @@ su postgres -c "/usr/local/pgsql/bin/createdb test" ...@@ -59,17 +63,16 @@ su postgres -c "/usr/local/pgsql/bin/createdb test"
# We did run two tests: # We did run two tests:
# The standard test # The standard test
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql
# and a test where we do a vacuum() after each update. # and a test where we do a vacuum() after each update.
# (The time for vacuum() is counted in the book-keeping() column) # (The time for vacuum() is counted in the book-keeping() column)
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast
# If you want to store the results in a output/RUN-xxx file, you should # If you want to store the results in a output/RUN-xxx file, you should
# repeate the benchmark with the extra option --log --use-old-result # repeate the benchmark with the extra option --log --use-old-result
# This will create a the RUN file based of the previous results # This will create a the RUN file based of the previous results
#
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --log --use-old-result
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 1G, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512MG, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast --log --use-old-result
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
# $server Object for current server # $server Object for current server
# $limits Hash reference to limits for benchmark # $limits Hash reference to limits for benchmark
$benchmark_version="2.12"; $benchmark_version="2.13";
use Getopt::Long; use Getopt::Long;
require "$pwd/server-cfg" || die "Can't read Configuration file: $!\n"; require "$pwd/server-cfg" || die "Can't read Configuration file: $!\n";
......
This diff is collapsed.
...@@ -121,53 +121,49 @@ sub new ...@@ -121,53 +121,49 @@ sub new
$self->{'vacuum'} = 1; # When using with --fast $self->{'vacuum'} = 1; # When using with --fast
$self->{'drop_attr'} = ""; $self->{'drop_attr'} = "";
$limits{'max_conditions'} = 9999; # (Actually not a limit) $limits{'NEG'} = 1; # Supports -id
$limits{'max_columns'} = 2000; # Max number of columns in table
# Windows can't handle that many files in one directory
$limits{'max_tables'} = (($machine || '') =~ "^win") ? 5000 : 65000;
$limits{'max_text_size'} = 65000; # Max size with default buffers.
$limits{'query_size'} = 1000000; # Max size with default buffers.
$limits{'max_index'} = 16; # Max number of keys
$limits{'max_index_parts'} = 16; # Max segments/key
$limits{'max_column_name'} = 64; # max table and column name
$limits{'join_optimizer'} = 1; # Can optimize FROM tables
$limits{'load_data_infile'} = 1; # Has load data infile
$limits{'lock_tables'} = 1; # Has lock tables
$limits{'functions'} = 1; # Has simple functions (+/-)
$limits{'group_functions'} = 1; # Have group functions
$limits{'group_func_sql_min_str'} = 1; # Can execute MIN() and MAX() on strings
$limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'select_without_from'}= 1; # Can do 'select 1';
$limits{'multi_drop'} = 1; # Drop table can take many tables
$limits{'subqueries'} = 0; # Doesn't support sub-queries.
$limits{'left_outer_join'} = 1; # Supports left outer joins
$limits{'table_wildcard'} = 1; # Has SELECT table_name.*
$limits{'having_with_alias'} = 1; # Can use aliases in HAVING
$limits{'having_with_group'} = 1; # Can use group functions in HAVING
$limits{'like_with_column'} = 1; # Can use column1 LIKE column2
$limits{'order_by_position'} = 1; # Can use 'ORDER BY 1'
$limits{'group_by_position'} = 1; # Can use 'GROUP BY 1'
$limits{'alter_table'} = 1; # Have ALTER TABLE
$limits{'alter_add_multi_col'}= 1; #Have ALTER TABLE t add a int,add b int; $limits{'alter_add_multi_col'}= 1; #Have ALTER TABLE t add a int,add b int;
$limits{'alter_table'} = 1; # Have ALTER TABLE
$limits{'alter_table_dropcol'}= 1; # Have ALTER TABLE DROP column $limits{'alter_table_dropcol'}= 1; # Have ALTER TABLE DROP column
$limits{'insert_multi_value'} = 1; # Have INSERT ... values (1,2),(3,4) $limits{'column_alias'} = 1; # Alias for fields in select statement.
$limits{'group_func_extra_std'} = 1; # Have group function std().
$limits{'func_odbc_mod'} = 1; # Have function mod.
$limits{'func_extra_%'} = 1; # Has % as alias for mod() $limits{'func_extra_%'} = 1; # Has % as alias for mod()
$limits{'func_odbc_floor'} = 1; # Has func_odbc_floor function
$limits{'func_extra_if'} = 1; # Have function if. $limits{'func_extra_if'} = 1; # Have function if.
$limits{'column_alias'} = 1; # Alias for fields in select statement.
$limits{'NEG'} = 1; # Supports -id
$limits{'func_extra_in_num'} = 1; # Has function in $limits{'func_extra_in_num'} = 1; # Has function in
$limits{'limit'} = 1; # supports the limit attribute $limits{'func_odbc_floor'} = 1; # Has func_odbc_floor function
$limits{'unique_index'} = 1; # Unique index works or not $limits{'func_odbc_mod'} = 1; # Have function mod.
$limits{'functions'} = 1; # Has simple functions (+/-)
$limits{'group_by_position'} = 1; # Can use 'GROUP BY 1'
$limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'group_func_extra_std'} = 1; # Have group function std().
$limits{'group_func_sql_min_str'} = 1; # Can execute MIN() and MAX() on strings
$limits{'group_functions'} = 1; # Have group functions
$limits{'having_with_alias'} = 1; # Can use aliases in HAVING
$limits{'having_with_group'} = 1; # Can use group functions in HAVING
$limits{'insert_multi_value'} = 1; # Have INSERT ... values (1,2),(3,4)
$limits{'insert_select'} = 1; $limits{'insert_select'} = 1;
$limits{'working_blobs'} = 1; # If big varchar/blobs works $limits{'join_optimizer'} = 1; # Can optimize FROM tables
$limits{'left_outer_join'} = 1; # Supports left outer joins
$limits{'like_with_column'} = 1; # Can use column1 LIKE column2
$limits{'limit'} = 1; # supports the limit attribute
$limits{'load_data_infile'} = 1; # Has load data infile
$limits{'lock_tables'} = 1; # Has lock tables
$limits{'max_column_name'} = 64; # max table and column name
$limits{'max_columns'} = 2000; # Max number of columns in table
$limits{'max_conditions'} = 9999; # (Actually not a limit)
$limits{'max_index'} = 16; # Max number of keys
$limits{'max_index_parts'} = 16; # Max segments/key
$limits{'max_tables'} = (($machine || '') =~ "^win") ? 5000 : 65000;
$limits{'max_text_size'} = 1000000; # Good enough for tests
$limits{'multi_drop'} = 1; # Drop table can take many tables
$limits{'order_by_position'} = 1; # Can use 'ORDER BY 1'
$limits{'order_by_unused'} = 1; $limits{'order_by_unused'} = 1;
$limits{'query_size'} = 1000000; # Max size with default buffers.
$limits{'select_without_from'}= 1; # Can do 'select 1';
$limits{'subqueries'} = 0; # Doesn't support sub-queries.
$limits{'table_wildcard'} = 1; # Has SELECT table_name.*
$limits{'unique_index'} = 1; # Unique index works or not
$limits{'working_all_fields'} = 1; $limits{'working_all_fields'} = 1;
$limits{'working_blobs'} = 1; # If big varchar/blobs works
$smds{'time'} = 1; $smds{'time'} = 1;
$smds{'q1'} = 'b'; # with time not supp by mysql ('') $smds{'q1'} = 'b'; # with time not supp by mysql ('')
...@@ -568,12 +564,12 @@ sub new ...@@ -568,12 +564,12 @@ sub new
$self->{'drop_attr'} = ""; $self->{'drop_attr'} = "";
$self->{"vacuum"} = 1; $self->{"vacuum"} = 1;
$limits{'join_optimizer'} = 1; # Can optimize FROM tables $limits{'join_optimizer'} = 1; # Can optimize FROM tables
$limits{'load_data_infile'} = 0; # Is this true ? $limits{'load_data_infile'} = 0;
$limits{'NEG'} = 1; # Can't handle -id $limits{'NEG'} = 1;
$limits{'alter_table'} = 1; # alter ??
$limits{'alter_add_multi_col'}= 0; # alter_add_multi_col ? $limits{'alter_add_multi_col'}= 0; # alter_add_multi_col ?
$limits{'alter_table_dropcol'}= 0; # alter_drop_col ? $limits{'alter_table'} = 1;
$limits{'alter_table_dropcol'}= 0;
$limits{'column_alias'} = 1; $limits{'column_alias'} = 1;
$limits{'func_extra_%'} = 1; $limits{'func_extra_%'} = 1;
$limits{'func_extra_if'} = 0; $limits{'func_extra_if'} = 0;
...@@ -582,33 +578,33 @@ sub new ...@@ -582,33 +578,33 @@ sub new
$limits{'func_odbc_mod'} = 1; # Has % $limits{'func_odbc_mod'} = 1; # Has %
$limits{'functions'} = 1; $limits{'functions'} = 1;
$limits{'group_by_position'} = 1; $limits{'group_by_position'} = 1;
$limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'group_func_extra_std'} = 0; $limits{'group_func_extra_std'} = 0;
$limits{'group_func_sql_min_str'}= 1; # Can execute MIN() and MAX() on strings $limits{'group_func_sql_min_str'}= 1; # Can execute MIN() and MAX() on strings
$limits{'group_functions'} = 1; $limits{'group_functions'} = 1;
$limits{'group_distinct_functions'}= 1; # Have count(distinct)
$limits{'having_with_alias'} = 0; $limits{'having_with_alias'} = 0;
$limits{'having_with_group'} = 1; $limits{'having_with_group'} = 1;
$limits{'left_outer_join'} = 0; $limits{'insert_select'} = 1;
$limits{'left_outer_join'} = 1;
$limits{'like_with_column'} = 1; $limits{'like_with_column'} = 1;
$limits{'lock_tables'} = 0; # in ATIS gives this a problem $limits{'lock_tables'} = 0; # in ATIS gives this a problem
$limits{'max_column_name'} = 128;
$limits{'max_columns'} = 1000; # 500 crashes pg 6.3
$limits{'max_conditions'} = 9999; # This makes Pg real slow
$limits{'max_index'} = 64; # Big enough
$limits{'max_index_parts'} = 16;
$limits{'max_tables'} = 5000; # 10000 crashes pg 7.0.2
$limits{'max_text_size'} = 65000; # Good enough for test
$limits{'multi_drop'} = 1; $limits{'multi_drop'} = 1;
$limits{'order_by_position'} = 1; $limits{'order_by_position'} = 1;
$limits{'order_by_unused'} = 1;
$limits{'query_size'} = 16777216;
$limits{'select_without_from'}= 1; $limits{'select_without_from'}= 1;
$limits{'subqueries'} = 1; $limits{'subqueries'} = 1;
$limits{'table_wildcard'} = 1; $limits{'table_wildcard'} = 1;
$limits{'max_column_name'} = 32; # Is this true
$limits{'max_columns'} = 1000; # 500 crashes pg 6.3
$limits{'max_tables'} = 5000; # 10000 crashes pg 7.0.2
$limits{'max_conditions'} = 30; # This makes Pg real slow
$limits{'max_index'} = 64; # Is this true ?
$limits{'max_index_parts'} = 16; # Is this true ?
$limits{'max_text_size'} = 7000; # 8000 crashes pg 6.3
$limits{'query_size'} = 16777216;
$limits{'unique_index'} = 1; # Unique index works or not $limits{'unique_index'} = 1; # Unique index works or not
$limits{'insert_select'} = 1;
$limits{'working_blobs'} = 1; # If big varchar/blobs works
$limits{'order_by_unused'} = 1;
$limits{'working_all_fields'} = 1; $limits{'working_all_fields'} = 1;
$limits{'working_blobs'} = 1; # If big varchar/blobs works
# the different cases per query ... # the different cases per query ...
$smds{'q1'} = 'b'; # with time $smds{'q1'} = 'b'; # with time
...@@ -639,7 +635,7 @@ sub new ...@@ -639,7 +635,7 @@ sub new
sub version sub version
{ {
my ($version,$dir); my ($version,$dir);
foreach $dir ($ENV{'PGDATA'},"/usr/local/pgsql/data", "/my/local/pgsql/") foreach $dir ($ENV{'PGDATA'},"/usr/local/pgsql/data", "/usr/local/pg/data")
{ {
if ($dir && -e "$dir/PG_VERSION") if ($dir && -e "$dir/PG_VERSION")
{ {
......
...@@ -266,7 +266,7 @@ for ($i=0 ; $i < $opt_loop_count ; $i++) ...@@ -266,7 +266,7 @@ for ($i=0 ; $i < $opt_loop_count ; $i++)
} }
$end_time=new Benchmark; $end_time=new Benchmark;
print "Time to select_big ($opt_loop_count): " . print "Time to select_big_str ($opt_loop_count): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n"; timestr(timediff($end_time, $loop_time),"all") . "\n\n";
$sth = $dbh->do("drop table bench1" . $server->{'drop_attr'}) $sth = $dbh->do("drop table bench1" . $server->{'drop_attr'})
......
...@@ -88,6 +88,7 @@ for ($i=0 ; $i < $opt_threads ; $i ++) ...@@ -88,6 +88,7 @@ for ($i=0 ; $i < $opt_threads ; $i ++)
{ {
test_select() if (($pid=fork()) == 0); $work{$pid}="select_key"; test_select() if (($pid=fork()) == 0); $work{$pid}="select_key";
} }
test_select_count() if (($pid=fork()) == 0); $work{$pid}="select_count";
test_delete() if (($pid=fork()) == 0); $work{$pid}="delete"; test_delete() if (($pid=fork()) == 0); $work{$pid}="delete";
test_update() if (($pid=fork()) == 0); $work{$pid}="update"; test_update() if (($pid=fork()) == 0); $work{$pid}="update";
test_flush() if (($pid=fork()) == 0); $work{$pid}= "flush"; test_flush() if (($pid=fork()) == 0); $work{$pid}= "flush";
...@@ -213,6 +214,35 @@ sub test_select ...@@ -213,6 +214,35 @@ sub test_select
exit(0); exit(0);
} }
#
# Do big select count(distinct..) over the table
#
sub test_select_count
{
my ($dbh, $i, $j, $count, $loop);
$dbh = DBI->connect("DBI:mysql:$opt_db:$opt_host",
$opt_user, $opt_password,
{ PrintError => 0}) || die $DBI::errstr;
$count=0;
$i=0;
while (!test_if_abort($dbh))
{
for ($j=0 ; $j < $numtables ; $j++)
{
my ($table)= $testtables[$j]->[0];
simple_query($dbh, "select count(distinct marker),count(distinct id),count(distinct info) from $table");
$count++;
}
sleep(20); # This query is quite slow
}
$dbh->disconnect; $dbh=0;
print "Test_select: Executed $count select count(distinct) queries\n";
exit(0);
}
# #
# Delete 1-5 rows from the first 2 tables. # Delete 1-5 rows from the first 2 tables.
# Test ends when the number of rows for table 3 didn't change during # Test ends when the number of rows for table 3 didn't change during
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment