Commit df5f8c18 authored by monty@hundin.mysql.fi's avatar monty@hundin.mysql.fi

Move HA_EXTRA_NO_READCHECK to ha_open

Fixed bug in multi-table-delete
parent bea12d76
......@@ -48135,6 +48135,13 @@ Our TODO section contains what we plan to have in 4.0. @xref{TODO MySQL 4.0}.
@itemize @bullet
@item
Fixed bug in multi table delete.
@item
Fixed bug in @code{SELECT CONCAT(argument-list) ... GROUP BY 1}.
@item
@code{SELECT .. INSERT} did a full rollback in case of an error. Fixed
so that we only rollback the last statement.
@item
Fixed bug with empty expression for boolean fulltext search.
@item
Fixed core dump bug in updating fulltext key from/to @code{NULL}.
......@@ -108,6 +108,7 @@ enum enum_server_command {COM_SLEEP,COM_QUIT,COM_INIT_DB,COM_QUERY,
struct st_vio; /* Only C */
typedef struct st_vio Vio;
#define MAX_CHAR_WIDTH 255 // Max length for a CHAR colum
#define MAX_BLOB_WIDTH 8192 // Default width for blob
typedef struct st_net {
......
......@@ -1101,3 +1101,25 @@ INFO_NOTE
select INFO_NOTE from t1 where STR_DATE > '20010610';
INFO_NOTE
drop table t1;
create table t1 (a int not null, b int, primary key (a)) type =bdb;
create table t2 (a int not null, b int, primary key (a)) type =bdb;
insert into t1 values (2, 3),(1, 7),(10, 7);
insert into t2 values (2, 3),(1, 7),(10, 7);
select * from t1;
a b
1 7
2 3
10 7
select * from t2;
a b
1 7
2 3
10 7
delete t1, t2 from t1, t2 where t1.a = t2.a;
select * from t1;
a b
select * from t2;
a b
select * from t2;
a b
drop table t1,t2;
......@@ -344,3 +344,30 @@ a 1
b 1
SET SQL_BIG_TABLES=0;
drop table t1;
CREATE TABLE t1 (
`a` char(193) default NULL,
`b` char(63) default NULL
);
INSERT INTO t1 VALUES ('abc','def'),('hij','klm');
SELECT CONCAT(a, b) FROM t1 GROUP BY 1;
CONCAT(a, b)
abcdef
hijklm
SELECT CONCAT(a, b),count(*) FROM t1 GROUP BY 1;
CONCAT(a, b) count(*)
abcdef 1
hijklm 1
SELECT CONCAT(a, b),count(distinct a) FROM t1 GROUP BY 1;
CONCAT(a, b) count(distinct a)
abcdef 1
hijklm 1
SELECT 1 FROM t1 GROUP BY CONCAT(a, b);
1
1
1
INSERT INTO t1 values ('hij','klm');
SELECT CONCAT(a, b),count(*) FROM t1 GROUP BY 1;
CONCAT(a, b) count(*)
abcdef 1
hijklm 2
DROP TABLE t1;
......@@ -767,3 +767,19 @@ select INFO_NOTE from t1 where STR_DATE = '20010610';
select INFO_NOTE from t1 where STR_DATE < '20010610';
select INFO_NOTE from t1 where STR_DATE > '20010610';
drop table t1;
#
# Test problem with multi table delete which quickly shows up with bdb tables.
#
create table t1 (a int not null, b int, primary key (a)) type =bdb;
create table t2 (a int not null, b int, primary key (a)) type =bdb;
insert into t1 values (2, 3),(1, 7),(10, 7);
insert into t2 values (2, 3),(1, 7),(10, 7);
select * from t1;
select * from t2;
delete t1, t2 from t1, t2 where t1.a = t2.a;
select * from t1;
select * from t2;
select * from t2;
drop table t1,t2;
......@@ -266,3 +266,20 @@ SELECT binary a FROM t1 GROUP BY 1;
SELECT binary a,count(*) FROM t1 GROUP BY 1;
SET SQL_BIG_TABLES=0;
drop table t1;
#
# Test of key >= 256 bytes
#
CREATE TABLE t1 (
`a` char(193) default NULL,
`b` char(63) default NULL
);
INSERT INTO t1 VALUES ('abc','def'),('hij','klm');
SELECT CONCAT(a, b) FROM t1 GROUP BY 1;
SELECT CONCAT(a, b),count(*) FROM t1 GROUP BY 1;
SELECT CONCAT(a, b),count(distinct a) FROM t1 GROUP BY 1;
SELECT 1 FROM t1 GROUP BY CONCAT(a, b);
INSERT INTO t1 values ('hij','klm');
SELECT CONCAT(a, b),count(*) FROM t1 GROUP BY 1;
DROP TABLE t1;
......@@ -1596,12 +1596,13 @@ int ha_berkeley::rnd_pos(byte * buf, byte *pos)
{
DBT db_pos;
statistic_increment(ha_read_rnd_count,&LOCK_status);
DBUG_ENTER("ha_berkeley::rnd_pos");
active_index= (uint) -1; // Don't delete via cursor
return read_row(file->get(file, transaction,
DBUG_RETURN(read_row(file->get(file, transaction,
get_pos(&db_pos, pos),
&current_row, 0),
(char*) buf, primary_key, &current_row, (DBT*) 0, 0);
(char*) buf, primary_key, &current_row, (DBT*) 0, 0));
}
void ha_berkeley::position(const byte *record)
......
......@@ -425,9 +425,8 @@ int handler::ha_open(const char *name, int mode, int test_if_locked)
{
if (table->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
table->db_stat|=HA_READ_ONLY;
}
if (!error)
{
(void) extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL
if (!alloc_root_inited(&table->mem_root)) // If temporary table
ref=(byte*) sql_alloc(ALIGN_SIZE(ref_length)*2);
else
......
......@@ -86,6 +86,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table,
}
else if (table->record_pointers)
{
DBUG_PRINT("info",("using record_pointers"));
table->file->rnd_init(0);
info->cache_pos=table->record_pointers;
info->cache_end=info->cache_pos+ table->found_records*info->ref_length;
......
......@@ -890,7 +890,7 @@ int collect_string(String *element,
int collect_real(double *element, element_count count __attribute__((unused)),
TREE_INFO *info)
{
char buff[255];
char buff[MAX_FIELD_WIDTH];
String s(buff, sizeof(buff));
if (info->found)
......@@ -909,7 +909,7 @@ int collect_longlong(longlong *element,
element_count count __attribute__((unused)),
TREE_INFO *info)
{
char buff[255];
char buff[MAX_FIELD_WIDTH];
String s(buff, sizeof(buff));
if (info->found)
......@@ -928,7 +928,7 @@ int collect_ulonglong(ulonglong *element,
element_count count __attribute__((unused)),
TREE_INFO *info)
{
char buff[255];
char buff[MAX_FIELD_WIDTH];
String s(buff, sizeof(buff));
if (info->found)
......
......@@ -300,6 +300,7 @@ static void free_cache_entry(TABLE *table)
void free_io_cache(TABLE *table)
{
DBUG_ENTER("free_io_cache");
if (table->io_cache)
{
close_cached_file(table->io_cache);
......@@ -311,6 +312,7 @@ void free_io_cache(TABLE *table)
my_free((gptr) table->record_pointers,MYF(0));
table->record_pointers=0;
}
DBUG_VOID_RETURN;
}
/* Close all tables which aren't in use by any thread */
......@@ -1301,7 +1303,6 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db,
if (error)
goto err;
}
(void) entry->file->extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL
DBUG_RETURN(0);
err:
DBUG_RETURN(1);
......@@ -1499,7 +1500,6 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db,
DBUG_RETURN(0);
}
tmp_table->file->extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL
tmp_table->reginfo.lock_type=TL_WRITE; // Simulate locked
tmp_table->tmp_table = (tmp_table->file->has_transactions() ?
TRANSACTIONAL_TMP_TABLE : TMP_TABLE);
......
......@@ -98,7 +98,6 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ORDER *order,
DBUG_RETURN(1);
}
}
(void) table->file->extra(HA_EXTRA_NO_READCHECK);
if (options & OPTION_QUICK)
(void) table->file->extra(HA_EXTRA_QUICK);
......@@ -157,8 +156,7 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, ORDER *order,
}
thd->proc_info="end";
end_read_record(&info);
/* if (order) free_io_cache(table); */ /* QQ Should not be needed */
(void) table->file->extra(HA_EXTRA_READCHECK);
free_io_cache(table); // Will not do any harm
if (options & OPTION_QUICK)
(void) table->file->extra(HA_EXTRA_NORMAL);
......@@ -219,15 +217,11 @@ multi_delete::multi_delete(THD *thd_arg, TABLE_LIST *dt,
not_trans_safe=false;
tempfiles = (Unique **) sql_calloc(sizeof(Unique *) * (num_of_tables-1));
(void) dt->table->file->extra(HA_EXTRA_NO_READCHECK);
/* Don't use key read with MULTI-TABLE-DELETE */
(void) dt->table->file->extra(HA_EXTRA_NO_KEYREAD);
dt->table->used_keys=0;
for (dt=dt->next ; dt ; dt=dt->next,counter++)
{
TABLE *table=dt->table;
(void) table->file->extra(HA_EXTRA_NO_READCHECK);
(void) table->file->extra(HA_EXTRA_NO_KEYREAD);
table->used_keys=0;
tempfiles[counter] = new Unique (refposcmp2,
(void *) &table->file->ref_length,
......@@ -291,13 +285,12 @@ multi_delete::initialize_tables(JOIN *join)
multi_delete::~multi_delete()
{
/* Add back EXTRA_READCHECK; In 4.0.1 we shouldn't need this anymore */
for (table_being_deleted=delete_tables ;
table_being_deleted ;
table_being_deleted=table_being_deleted->next)
{
TABLE *t=table_being_deleted->table;
(void) t->file->extra(HA_EXTRA_READCHECK);
free_io_cache(t); // Alloced by unique
t->no_keyread=0;
}
......@@ -353,19 +346,17 @@ void multi_delete::send_error(uint errcode,const char *err)
/* First send error what ever it is ... */
::send_error(&thd->net,errcode,err);
/* reset used flags */
// delete_tables->table->no_keyread=0;
/* If nothing deleted return */
if (!deleted)
return;
/* Below can happen when thread is killed early ... */
if (!table_being_deleted)
table_being_deleted=delete_tables;
/*
If rows from the first table only has been deleted and it is transactional,
just do rollback.
If rows from the first table only has been deleted and it is
transactional, just do rollback.
The same if all tables are transactional, regardless of where we are.
In all other cases do attempt deletes ...
*/
......@@ -411,27 +402,6 @@ int multi_delete::do_deletes (bool from_send_error)
break;
}
#if USE_REGENERATE_TABLE
// nice little optimization ....
// but Monty has to fix generate_table...
// This will not work for transactional tables because for other types
// records is not absolute
if (num_of_positions == table->file->records)
{
TABLE_LIST table_list;
bzero((char*) &table_list,sizeof(table_list));
table_list.name=table->table_name;
table_list.real_name=table_being_deleted->real_name;
table_list.table=table;
table_list.grant=table->grant;
table_list.db = table_being_deleted->db;
error=generate_table(thd,&table_list,(TABLE *)0);
if (error <= 0) {error = 1; break;}
deleted += num_of_positions;
continue;
}
#endif /* USE_REGENERATE_TABLE */
READ_RECORD info;
init_read_record(&info,thd,table,NULL,0,0);
while (!(error=info.read_record(&info)) &&
......@@ -452,15 +422,19 @@ int multi_delete::do_deletes (bool from_send_error)
}
/*
return: 0 sucess
1 error
*/
bool multi_delete::send_eof()
{
thd->proc_info="deleting from reference tables"; /* out: 1 if error, 0 if success */
thd->proc_info="deleting from reference tables";
/* Does deletes for the last n - 1 tables, returns 0 if ok */
int error = do_deletes(false); /* do_deletes returns 0 if success */
int error = do_deletes(false); // returns 0 if success
/* reset used flags */
// delete_tables->table->no_keyread=0; // Will stay in comment until Monty approves changes
thd->proc_info="end";
if (error)
{
......@@ -468,11 +442,12 @@ bool multi_delete::send_eof()
return 1;
}
/* Write the SQL statement to the binlog if we deleted
/*
Write the SQL statement to the binlog if we deleted
rows and we succeeded, or also in an error case when there
was a non-transaction-safe table involved, since
modifications in it cannot be rolled back. */
modifications in it cannot be rolled back.
*/
if (deleted || not_trans_safe)
{
mysql_update_log.write(thd,thd->query,thd->query_length);
......
......@@ -3447,7 +3447,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
if (!param->quick_group)
group=0; // Can't use group key
else for (ORDER *tmp=group ; tmp ; tmp=tmp->next)
{
(*tmp->item)->marker=4; // Store null in key
if ((*tmp->item)->max_length >= MAX_CHAR_WIDTH)
using_unique_constraint=1;
}
if (param->group_length >= MAX_BLOB_WIDTH)
using_unique_constraint=1;
if (group)
......@@ -3852,7 +3856,6 @@ static bool open_tmp_table(TABLE *table)
return(1);
}
/* VOID(ha_lock(table,F_WRLCK)); */ /* Single thread table */
(void) table->file->extra(HA_EXTRA_NO_READCHECK); /* Not needed */
(void) table->file->extra(HA_EXTRA_QUICK); /* Faster */
return(0);
}
......@@ -5651,7 +5654,6 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
DBUG_ENTER("remove_duplicates");
entry->reginfo.lock_type=TL_WRITE;
entry->file->extra(HA_EXTRA_NO_READCHECK);
/* Calculate how many saved fields there is in list */
field_count=0;
......
......@@ -257,8 +257,6 @@ int mysql_update(THD *thd,
}
}
if (!(test_flags & TEST_READCHECK)) /* For debugging */
VOID(table->file->extra(HA_EXTRA_NO_READCHECK));
if (handle_duplicates == DUP_IGNORE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
init_read_record(&info,thd,table,select,0,1);
......@@ -303,7 +301,6 @@ int mysql_update(THD *thd,
}
end_read_record(&info);
thd->proc_info="end";
VOID(table->file->extra(HA_EXTRA_READCHECK));
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
table->time_stamp=save_time_stamp; // Restore auto timestamp pointer
using_transactions=table->file->has_transactions();
......@@ -362,7 +359,6 @@ multi_update::multi_update(THD *thd_arg, TABLE_LIST *ut, List<Item> &fs,
for (TABLE_LIST *dt=ut ; dt ; dt=dt->next,counter++)
{
TABLE *table=ut->table;
(void) ut->table->file->extra(HA_EXTRA_NO_READCHECK);
(void) ut->table->file->extra(HA_EXTRA_NO_KEYREAD);
dt->table->used_keys=0;
if (table->timestamp_field)
......@@ -521,14 +517,12 @@ multi_update::initialize_tables(JOIN *join)
multi_update::~multi_update()
{
/* Add back EXTRA_READCHECK; In 4.0.1 we shouldn't need this anymore */
int counter = 0;
for (table_being_updated=update_tables ;
table_being_updated ;
counter++, table_being_updated=table_being_updated->next)
{
TABLE *table=table_being_updated->table;
(void)table->file->extra(HA_EXTRA_READCHECK);
if (error)
table->time_stamp=save_time_stamps[counter];
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment