Commit e4e61b09 authored by jonas@perch.ndb.mysql.com's avatar jonas@perch.ndb.mysql.com

Merge joreland@bk-internal.mysql.com:/home/bk/mysql-5.0

into  perch.ndb.mysql.com:/home/jonas/src/mysql-5.0
parents 7e0733fe f5c52797
--exec $MYSQL test -e 'show processlist' | grep 'Binlog Dump' | cut -f1 > $MYSQLTEST_VARDIR/tmp/bl_dump_thread_id
--disable_warnings
drop table if exists t999;
--enable_warnings
create temporary table t999 (f int);
--replace_result $MYSQLTEST_VARDIR "."
eval LOAD DATA INFILE "$MYSQLTEST_VARDIR/tmp/bl_dump_thread_id" into table t999;
let $id = `select f from t999`;
drop table t999;
......@@ -465,8 +465,7 @@ sub mtr_kill_leftovers () {
if ( kill(0, @pids) ) # Check if some left
{
# FIXME maybe just mtr_warning() ?
mtr_error("can't kill process(es) " . join(" ", @pids));
mtr_warning("can't kill process(es) " . join(" ", @pids));
}
}
}
......@@ -479,7 +478,7 @@ sub mtr_kill_leftovers () {
{
if ( mtr_ping_mysqld_server($srv->{'port'}, $srv->{'sockfile'}) )
{
mtr_error("can't kill old mysqld holding port $srv->{'port'}");
mtr_warning("can't kill old mysqld holding port $srv->{'port'}");
}
}
}
......
......@@ -312,6 +312,9 @@ our $opt_udiff;
our $opt_skip_ndbcluster;
our $opt_with_ndbcluster;
our $opt_with_ndbcluster_only= 0; # dummy, ignored
our $opt_with_openssl;
our $exe_ndb_mgm;
our $path_ndb_tools_dir;
......@@ -572,6 +575,7 @@ sub command_line_setup () {
'force' => \$opt_force,
'with-ndbcluster' => \$opt_with_ndbcluster,
'skip-ndbcluster|skip-ndb' => \$opt_skip_ndbcluster,
'with-ndbcluster-only' => \$opt_with_ndbcluster_only,
'do-test=s' => \$opt_do_test,
'suite=s' => \$opt_suite,
'skip-rpl' => \$opt_skip_rpl,
......@@ -677,6 +681,11 @@ sub command_line_setup () {
print '#' x 78, "\n\n";
}
if ( $opt_with_ndbcluster_only )
{
print "# Option '--with-ndbcluster-only' is ignored in this release.\n";
}
foreach my $arg ( @ARGV )
{
if ( $arg =~ /^--skip-/ )
......
......@@ -277,6 +277,7 @@ EXTRA_MYSQLSHOW_OPT=""
EXTRA_MYSQLBINLOG_OPT=""
USE_RUNNING_SERVER=0
USE_NDBCLUSTER=@USE_NDBCLUSTER@
USE_NDBCLUSTER_ONLY=0
USE_RUNNING_NDBCLUSTER=""
USE_PURIFY=""
PURIFY_LOGS=""
......@@ -315,6 +316,8 @@ STRESS_INIT_FILE=""
STRESS_TEST_FILE=""
STRESS_TEST=""
$ECHO "Logging: $0 $*" # To ensure we see all arguments in the output, for the test analysis tool
while test $# -gt 0; do
case "$1" in
--embedded-server)
......@@ -341,6 +344,10 @@ while test $# -gt 0; do
--extern) USE_RUNNING_SERVER=1 ;;
--with-ndbcluster)
USE_NDBCLUSTER="--ndbcluster" ;;
--with-ndbcluster-only)
USE_NDBCLUSTER="--ndbcluster"
USE_NDBCLUSTER_SLAVE="--ndbcluster"
USE_NDBCLUSTER_ONLY=1 ;;
--ndb-connectstring=*)
USE_NDBCLUSTER="--ndbcluster" ;
USE_RUNNING_NDBCLUSTER=`$ECHO "$1" | $SED -e "s;--ndb-connectstring=;;"` ;;
......@@ -1654,6 +1661,11 @@ run_testcase ()
result_file="r/$tname.result"
echo $tname > $CURRENT_TEST
SKIP_SLAVE=`$EXPR \( $tname : rpl \) = 0 \& \( $tname : federated \) = 0`
NDBCLUSTER_TEST=`$EXPR \( $tname : '.*ndb.*' \) != 0`
if [ "x$USE_NDBCLUSTER_ONLY" = "x1" -a "x$NDBCLUSTER_TEST" != "x1" ] ; then
skip_test $tname
return
fi
if [ "$USE_MANAGER" = 1 ] ; then
many_slaves=`$EXPR \( \( $tname : rpl_failsafe \) != 0 \) \| \( \( $tname : rpl_chain_temp_table \) != 0 \)`
fi
......
......@@ -359,3 +359,38 @@ group by s1 collate latin1_swedish_ci having s1 = 'y';
s1 count(s1)
y 1
drop table t1;
DROP SCHEMA IF EXISTS HU;
Warnings:
Note 1008 Can't drop database 'HU'; database doesn't exist
CREATE SCHEMA HU ;
USE HU ;
CREATE TABLE STAFF
(EMPNUM CHAR(3) NOT NULL UNIQUE,
EMPNAME CHAR(20),
GRADE DECIMAL(4),
CITY CHAR(15));
CREATE TABLE PROJ
(PNUM CHAR(3) NOT NULL UNIQUE,
PNAME CHAR(20),
PTYPE CHAR(6),
BUDGET DECIMAL(9),
CITY CHAR(15));
INSERT INTO STAFF VALUES ('E1','Alice',12,'Deale');
INSERT INTO STAFF VALUES ('E2','Betty',10,'Vienna');
INSERT INTO STAFF VALUES ('E3','Carmen',13,'Vienna');
INSERT INTO STAFF VALUES ('E4','Don',12,'Deale');
INSERT INTO STAFF VALUES ('E5','Ed',13,'Akron');
INSERT INTO PROJ VALUES ('P1','MXSS','Design',10000,'Deale');
INSERT INTO PROJ VALUES ('P2','CALM','Code',30000,'Vienna');
INSERT INTO PROJ VALUES ('P3','SDP','Test',30000,'Tampa');
INSERT INTO PROJ VALUES ('P4','SDP','Design',20000,'Deale');
INSERT INTO PROJ VALUES ('P5','IRM','Test',10000,'Vienna');
INSERT INTO PROJ VALUES ('P6','PAYR','Design',50000,'Deale');
SELECT EMPNUM, GRADE*1000
FROM HU.STAFF WHERE GRADE * 1000 >
ANY (SELECT SUM(BUDGET) FROM HU.PROJ
GROUP BY CITY, PTYPE
HAVING HU.PROJ.CITY = HU.STAFF.CITY);
EMPNUM GRADE*1000
E3 13000
DROP SCHEMA HU;
......@@ -89,3 +89,17 @@ f
7
drop table t1,t2;
create temporary table t3 (f int);
create temporary table t4 (f int);
create table t5 (f int);
drop table if exists t999;
create temporary table t999 (f int);
LOAD DATA INFILE "./tmp/bl_dump_thread_id" into table t999;
drop table t999;
insert into t4 values (1);
kill `select id from information_schema.processlist where command='Binlog Dump'`;
insert into t5 select * from t4;
select * from t5 /* must be 1 after reconnection */;
f
1
drop temporary table t4;
drop table t5;
......@@ -347,3 +347,47 @@ group by s1 collate latin1_swedish_ci having s1 = 'y';
# MySQL returns: 1 row, with count(s1) = 1
drop table t1;
#
# Bug #15917: unexpected complain for a name in having clause
# when the server is run on Windows or with --lower-case-table-names=1
#
DROP SCHEMA IF EXISTS HU;
CREATE SCHEMA HU ;
USE HU ;
CREATE TABLE STAFF
(EMPNUM CHAR(3) NOT NULL UNIQUE,
EMPNAME CHAR(20),
GRADE DECIMAL(4),
CITY CHAR(15));
CREATE TABLE PROJ
(PNUM CHAR(3) NOT NULL UNIQUE,
PNAME CHAR(20),
PTYPE CHAR(6),
BUDGET DECIMAL(9),
CITY CHAR(15));
INSERT INTO STAFF VALUES ('E1','Alice',12,'Deale');
INSERT INTO STAFF VALUES ('E2','Betty',10,'Vienna');
INSERT INTO STAFF VALUES ('E3','Carmen',13,'Vienna');
INSERT INTO STAFF VALUES ('E4','Don',12,'Deale');
INSERT INTO STAFF VALUES ('E5','Ed',13,'Akron');
INSERT INTO PROJ VALUES ('P1','MXSS','Design',10000,'Deale');
INSERT INTO PROJ VALUES ('P2','CALM','Code',30000,'Vienna');
INSERT INTO PROJ VALUES ('P3','SDP','Test',30000,'Tampa');
INSERT INTO PROJ VALUES ('P4','SDP','Design',20000,'Deale');
INSERT INTO PROJ VALUES ('P5','IRM','Test',10000,'Vienna');
INSERT INTO PROJ VALUES ('P6','PAYR','Design',50000,'Deale');
SELECT EMPNUM, GRADE*1000
FROM HU.STAFF WHERE GRADE * 1000 >
ANY (SELECT SUM(BUDGET) FROM HU.PROJ
GROUP BY CITY, PTYPE
HAVING HU.PROJ.CITY = HU.STAFF.CITY);
DROP SCHEMA HU;
......@@ -129,6 +129,31 @@ drop table t1,t2;
create temporary table t3 (f int);
sync_with_master;
#
# Bug#17284 erroneous temp table cleanup on slave
#
connection master;
create temporary table t4 (f int);
create table t5 (f int);
sync_with_master;
# find dumper's $id
source include/get_binlog_dump_thread_id.inc;
insert into t4 values (1);
# a hint how to do that in 5.1
--replace_result $id "`select id from information_schema.processlist where command='Binlog Dump'`"
eval kill $id; # to stimulate reconnection by slave w/o timeout
insert into t5 select * from t4;
save_master_pos;
connection slave;
sync_with_master;
select * from t5 /* must be 1 after reconnection */;
connection master;
drop temporary table t4;
drop table t5;
# The server will now close done
# End of 4.1 tests
# End of 5.0 tests
......@@ -3054,6 +3054,7 @@ static Item** find_field_in_group_list(Item *find_item, ORDER *group_list)
int found_match_degree= 0;
Item_ident *cur_field;
int cur_match_degree= 0;
char name_buff[NAME_LEN+1];
if (find_item->type() == Item::FIELD_ITEM ||
find_item->type() == Item::REF_ITEM)
......@@ -3065,6 +3066,14 @@ static Item** find_field_in_group_list(Item *find_item, ORDER *group_list)
else
return NULL;
if (db_name && lower_case_table_names)
{
/* Convert database to lower case for comparison */
strmake(name_buff, db_name, sizeof(name_buff)-1);
my_casedn_str(files_charset_info, name_buff);
db_name= name_buff;
}
DBUG_ASSERT(field_name != 0);
for (ORDER *cur_group= group_list ; cur_group ; cur_group= cur_group->next)
......
......@@ -1882,7 +1882,11 @@ DBUG_skip_commit:
rotate binlog, if necessary.
*/
if (commit_event->get_type_code() == XID_EVENT)
thread_safe_increment(prepared_xids, &LOCK_prep_xids);
{
pthread_mutex_lock(&LOCK_prep_xids);
prepared_xids++;
pthread_mutex_unlock(&LOCK_prep_xids);
}
else
rotate_and_purge(RP_LOCK_LOG_IS_ALREADY_LOCKED);
}
......
......@@ -830,6 +830,11 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
flags.sql_mode,
flags.max_sort_length,
flags.group_concat_max_len));
/*
Make InnoDB to release the adaptive hash index latch before
acquiring the query cache mutex.
*/
ha_release_temporary_latches(thd);
STRUCT_LOCK(&structure_guard_mutex);
if (query_cache_size == 0)
......
......@@ -919,14 +919,12 @@ bool select_send::send_data(List<Item> &items)
return 0;
}
#ifdef HAVE_INNOBASE_DB
/*
We may be passing the control from mysqld to the client: release the
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
by thd
*/
ha_release_temporary_latches(thd);
#endif
List_iterator_fast<Item> li(items);
Protocol *protocol= thd->protocol;
......@@ -956,12 +954,10 @@ bool select_send::send_data(List<Item> &items)
bool select_send::send_eof()
{
#ifdef HAVE_INNOBASE_DB
/* We may be passing the control from mysqld to the client: release the
InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved
by thd */
ha_release_temporary_latches(thd);
#endif
/* Unlock tables before sending packet to gain some speed */
if (thd->lock)
......
......@@ -445,9 +445,8 @@ Sensitive_cursor::fetch(ulong num_rows)
if (error == NESTED_LOOP_CURSOR_LIMIT)
join->resume_nested_loop= TRUE;
#ifdef USING_TRANSACTIONS
ha_release_temporary_latches(thd);
#endif
/* Grab free_list here to correctly free it in close */
thd->restore_active_arena(this, &backup_arena);
......
......@@ -466,6 +466,12 @@ impossible position";
(rli->group_master_log_pos)
*/
int4store((char*) packet->ptr()+LOG_POS_OFFSET+1, 0);
/*
if reconnect master sends FD event with `created' as 0
to avoid destroying temp tables.
*/
int4store((char*) packet->ptr()+LOG_EVENT_MINIMAL_HEADER_LEN+
ST_CREATED_OFFSET+1, (ulong) 0);
/* send it */
if (my_net_write(net, (char*)packet->ptr(), packet->length()))
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment