Commit 384ace38 authored by unknown's avatar unknown

Merge chilla.local:/home/mydev/mysql-5.0

into  chilla.local:/home/mydev/mysql-5.0-bug20719


myisam/mi_dynrec.c:
  Auto merged
ndb/include/kernel/GlobalSignalNumbers.h:
  Auto merged
ndb/src/kernel/blocks/dbdict/Dbdict.cpp:
  Auto merged
ndb/src/kernel/blocks/dbdict/Dbdict.hpp:
  Auto merged
ndb/src/kernel/blocks/dbdih/DbdihMain.cpp:
  Auto merged
ndb/src/ndbapi/ndberror.c:
  Auto merged
sql/ha_ndbcluster.cc:
  Auto merged
sql/handler.h:
  Auto merged
sql/sql_base.cc:
  Auto merged
sql/sql_insert.cc:
  Auto merged
parents 034522f4 9493d464
......@@ -1130,12 +1130,41 @@ void _my_store_blob_length(byte *pos,uint pack_length,uint length)
}
/* Read record from datafile */
/* Returns 0 if ok, -1 if error */
/*
Read record from datafile.
SYNOPSIS
_mi_read_dynamic_record()
info MI_INFO pointer to table.
filepos From where to read the record.
buf Destination for record.
NOTE
If a write buffer is active, it needs to be flushed if its contents
intersects with the record to read. We always check if the position
of the first byte of the write buffer is lower than the position
past the last byte to read. In theory this is also true if the write
buffer is completely below the read segment. That is, if there is no
intersection. But this case is unusual. We flush anyway. Only if the
first byte in the write buffer is above the last byte to read, we do
not flush.
A dynamic record may need several reads. So this check must be done
before every read. Reading a dynamic record starts with reading the
block header. If the record does not fit into the free space of the
header, the block may be longer than the header. In this case a
second read is necessary. These one or two reads repeat for every
part of the record.
RETURN
0 OK
-1 Error
*/
int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf)
{
int flag;
int block_of_record;
uint b_type,left_length;
byte *to;
MI_BLOCK_INFO block_info;
......@@ -1147,20 +1176,19 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf)
LINT_INIT(to);
LINT_INIT(left_length);
file=info->dfile;
block_info.next_filepos=filepos; /* for easyer loop */
flag=block_info.second_read=0;
block_of_record= 0; /* First block of record is numbered as zero. */
block_info.second_read= 0;
do
{
/* A corrupted table can have wrong pointers. (Bug# 19835) */
if (filepos == HA_OFFSET_ERROR)
goto panic;
if (info->opt_flag & WRITE_CACHE_USED &&
info->rec_cache.pos_in_file <= block_info.next_filepos &&
info->rec_cache.pos_in_file < filepos + MI_BLOCK_INFO_HEADER_LENGTH &&
flush_io_cache(&info->rec_cache))
goto err;
/* A corrupted table can have wrong pointers. (Bug# 19835) */
if (block_info.next_filepos == HA_OFFSET_ERROR)
goto panic;
info->rec_cache.seek_not_done=1;
if ((b_type=_mi_get_block_info(&block_info,file,
block_info.next_filepos))
if ((b_type= _mi_get_block_info(&block_info, file, filepos))
& (BLOCK_DELETED | BLOCK_ERROR | BLOCK_SYNC_ERROR |
BLOCK_FATAL_ERROR))
{
......@@ -1168,9 +1196,8 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf)
my_errno=HA_ERR_RECORD_DELETED;
goto err;
}
if (flag == 0) /* First block */
if (block_of_record++ == 0) /* First block */
{
flag=1;
if (block_info.rec_len > (uint) info->s->base.max_pack_length)
goto panic;
if (info->s->base.blobs)
......@@ -1185,11 +1212,35 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf)
}
if (left_length < block_info.data_len || ! block_info.data_len)
goto panic; /* Wrong linked record */
if (my_pread(file,(byte*) to,block_info.data_len,block_info.filepos,
MYF(MY_NABP)))
goto panic;
left_length-=block_info.data_len;
to+=block_info.data_len;
/* copy information that is already read */
{
uint offset= (uint) (block_info.filepos - filepos);
uint prefetch_len= (sizeof(block_info.header) - offset);
filepos+= sizeof(block_info.header);
if (prefetch_len > block_info.data_len)
prefetch_len= block_info.data_len;
if (prefetch_len)
{
memcpy((byte*) to, block_info.header + offset, prefetch_len);
block_info.data_len-= prefetch_len;
left_length-= prefetch_len;
to+= prefetch_len;
}
}
/* read rest of record from file */
if (block_info.data_len)
{
if (info->opt_flag & WRITE_CACHE_USED &&
info->rec_cache.pos_in_file < filepos + block_info.data_len &&
flush_io_cache(&info->rec_cache))
goto err;
if (my_read(file, (byte*) to, block_info.data_len, MYF(MY_NABP)))
goto panic;
left_length-=block_info.data_len;
to+=block_info.data_len;
}
filepos= block_info.next_filepos;
} while (left_length);
info->update|= HA_STATE_AKTIV; /* We have a aktive record */
......@@ -1346,11 +1397,45 @@ err:
}
/*
Read record from datafile.
SYNOPSIS
_mi_read_rnd_dynamic_record()
info MI_INFO pointer to table.
buf Destination for record.
filepos From where to read the record.
skip_deleted_blocks If to repeat reading until a non-deleted
record is found.
NOTE
If a write buffer is active, it needs to be flushed if its contents
intersects with the record to read. We always check if the position
of the first byte of the write buffer is lower than the position
past the last byte to read. In theory this is also true if the write
buffer is completely below the read segment. That is, if there is no
intersection. But this case is unusual. We flush anyway. Only if the
first byte in the write buffer is above the last byte to read, we do
not flush.
A dynamic record may need several reads. So this check must be done
before every read. Reading a dynamic record starts with reading the
block header. If the record does not fit into the free space of the
header, the block may be longer than the header. In this case a
second read is necessary. These one or two reads repeat for every
part of the record.
RETURN
0 OK
!= 0 Error
*/
int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
register my_off_t filepos,
my_bool skip_deleted_blocks)
{
int flag,info_read,save_errno;
int block_of_record, info_read, save_errno;
uint left_len,b_type;
byte *to;
MI_BLOCK_INFO block_info;
......@@ -1376,7 +1461,8 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
else
info_read=1; /* memory-keyinfoblock is ok */
flag=block_info.second_read=0;
block_of_record= 0; /* First block of record is numbered as zero. */
block_info.second_read= 0;
left_len=1;
do
{
......@@ -1399,15 +1485,15 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
{
if (_mi_read_cache(&info->rec_cache,(byte*) block_info.header,filepos,
sizeof(block_info.header),
(!flag && skip_deleted_blocks ? READING_NEXT : 0) |
READING_HEADER))
(!block_of_record && skip_deleted_blocks ?
READING_NEXT : 0) | READING_HEADER))
goto panic;
b_type=_mi_get_block_info(&block_info,-1,filepos);
}
else
{
if (info->opt_flag & WRITE_CACHE_USED &&
info->rec_cache.pos_in_file <= filepos &&
info->rec_cache.pos_in_file < filepos + MI_BLOCK_INFO_HEADER_LENGTH &&
flush_io_cache(&info->rec_cache))
DBUG_RETURN(my_errno);
info->rec_cache.seek_not_done=1;
......@@ -1432,7 +1518,7 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
}
goto err;
}
if (flag == 0) /* First block */
if (block_of_record == 0) /* First block */
{
if (block_info.rec_len > (uint) share->base.max_pack_length)
goto panic;
......@@ -1465,7 +1551,7 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
left_len-=tmp_length;
to+=tmp_length;
filepos+=tmp_length;
}
}
}
/* read rest of record from file */
if (block_info.data_len)
......@@ -1474,11 +1560,17 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
{
if (_mi_read_cache(&info->rec_cache,(byte*) to,filepos,
block_info.data_len,
(!flag && skip_deleted_blocks) ? READING_NEXT :0))
(!block_of_record && skip_deleted_blocks) ?
READING_NEXT : 0))
goto panic;
}
else
{
if (info->opt_flag & WRITE_CACHE_USED &&
info->rec_cache.pos_in_file <
block_info.filepos + block_info.data_len &&
flush_io_cache(&info->rec_cache))
goto err;
/* VOID(my_seek(info->dfile,filepos,MY_SEEK_SET,MYF(0))); */
if (my_read(info->dfile,(byte*) to,block_info.data_len,MYF(MY_NABP)))
{
......@@ -1488,10 +1580,14 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, byte *buf,
}
}
}
if (flag++ == 0)
/*
Increment block-of-record counter. If it was the first block,
remember the position behind the block for the next call.
*/
if (block_of_record++ == 0)
{
info->nextpos=block_info.filepos+block_info.block_len;
skip_deleted_blocks=0;
info->nextpos= block_info.filepos + block_info.block_len;
skip_deleted_blocks= 0;
}
left_len-=block_info.data_len;
to+=block_info.data_len;
......@@ -1523,6 +1619,11 @@ uint _mi_get_block_info(MI_BLOCK_INFO *info, File file, my_off_t filepos)
if (file >= 0)
{
/*
We do not use my_pread() here because we want to have the file
pointer set to the end of the header after this function.
my_pread() may leave the file pointer untouched.
*/
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
if (my_read(file,(char*) header,sizeof(info->header),MYF(0)) !=
sizeof(info->header))
......
......@@ -183,3 +183,47 @@ a
32
42
drop table t1;
create table t1 (a tinyint not null auto_increment primary key) engine=myisam;
insert into t1 values(103);
set auto_increment_increment=11;
set auto_increment_offset=4;
insert into t1 values(null);
insert into t1 values(null);
insert into t1 values(null);
ERROR 23000: Duplicate entry '125' for key 1
select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t1 order by a;
a mod(a-@@auto_increment_offset,@@auto_increment_increment)
103 0
114 0
125 0
create table t2 (a tinyint unsigned not null auto_increment primary key) engine=myisam;
set auto_increment_increment=10;
set auto_increment_offset=1;
set insert_id=1000;
insert into t2 values(null);
Warnings:
Warning 1264 Out of range value adjusted for column 'a' at row 1
select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t2 order by a;
a mod(a-@@auto_increment_offset,@@auto_increment_increment)
251 0
create table t3 like t1;
set auto_increment_increment=1000;
set auto_increment_offset=700;
insert into t3 values(null);
Warnings:
Warning 1264 Out of range value adjusted for column 'a' at row 1
select * from t3 order by a;
a
127
select * from t1 order by a;
a
103
114
125
select * from t2 order by a;
a
251
select * from t3 order by a;
a
127
drop table t1,t2,t3;
......@@ -132,3 +132,68 @@ id last_id
drop function bug15728;
drop function bug15728_insert;
drop table t1, t2;
create table t1 (n int primary key auto_increment not null,
b int, unique(b));
set sql_log_bin=0;
insert into t1 values(null,100);
replace into t1 values(null,50),(null,100),(null,150);
select * from t1 order by n;
n b
2 50
3 100
4 150
truncate table t1;
set sql_log_bin=1;
insert into t1 values(null,100);
select * from t1 order by n;
n b
1 100
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
select * from t1 order by n;
n b
1 100
replace into t1 values(null,100),(null,350);
select * from t1 order by n;
n b
2 100
3 350
select * from t1 order by n;
n b
2 100
3 350
insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
select * from t1 order by n;
n b
2 100
4 400
1000 350
1001 600
select * from t1 order by n;
n b
2 100
4 400
1000 350
1001 600
drop table t1;
create table t1 (n int primary key auto_increment not null,
b int, unique(b));
insert into t1 values(null,100);
select * from t1 order by n;
n b
1 100
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
select * from t1 order by n;
n b
1 100
insert into t1 values(null,100),(null,350) on duplicate key update n=2;
select * from t1 order by n;
n b
2 100
3 350
select * from t1 order by n;
n b
2 100
3 350
drop table t1;
......@@ -96,9 +96,47 @@ select * from t1;
sync_slave_with_master;
select * from t1;
connection master;
# Test for BUG#20524 "auto_increment_* not observed when inserting
# a too large value". When an autogenerated value was bigger than the
# maximum possible value of the field, it was truncated to that max
# possible value, without being "rounded down" to still honour
# auto_increment_* variables.
connection master;
drop table t1;
create table t1 (a tinyint not null auto_increment primary key) engine=myisam;
insert into t1 values(103);
set auto_increment_increment=11;
set auto_increment_offset=4;
insert into t1 values(null);
insert into t1 values(null);
--error 1062
insert into t1 values(null);
select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t1 order by a;
# same but with a larger value
create table t2 (a tinyint unsigned not null auto_increment primary key) engine=myisam;
set auto_increment_increment=10;
set auto_increment_offset=1;
set insert_id=1000;
insert into t2 values(null);
select a, mod(a-@@auto_increment_offset,@@auto_increment_increment) from t2 order by a;
# An offset so big that even first value does not fit
create table t3 like t1;
set auto_increment_increment=1000;
set auto_increment_offset=700;
insert into t3 values(null);
select * from t3 order by a;
sync_slave_with_master;
select * from t1 order by a;
select * from t2 order by a;
select * from t3 order by a;
connection master;
drop table t1,t2,t3;
# End cleanup
sync_slave_with_master;
......@@ -147,6 +147,69 @@ drop function bug15728;
drop function bug15728_insert;
drop table t1, t2;
# test of BUG#20188 REPLACE or ON DUPLICATE KEY UPDATE in
# auto_increment breaks binlog
create table t1 (n int primary key auto_increment not null,
b int, unique(b));
# First, test that we do not call restore_auto_increment() too early
# in write_record():
set sql_log_bin=0;
insert into t1 values(null,100);
replace into t1 values(null,50),(null,100),(null,150);
select * from t1 order by n;
truncate table t1;
set sql_log_bin=1;
insert into t1 values(null,100);
select * from t1 order by n;
sync_slave_with_master;
# make slave's table autoinc counter bigger
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
# check that slave's table content is identical to master
select * from t1 order by n;
# only the auto_inc counter differs.
connection master;
replace into t1 values(null,100),(null,350);
select * from t1 order by n;
sync_slave_with_master;
select * from t1 order by n;
# Same test as for REPLACE, but for ON DUPLICATE KEY UPDATE
# We first check that if we update a row using a value larger than the
# table's counter, the counter for next row is bigger than the
# after-value of the updated row.
connection master;
insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
select * from t1 order by n;
sync_slave_with_master;
select * from t1 order by n;
# and now test for the bug:
connection master;
drop table t1;
create table t1 (n int primary key auto_increment not null,
b int, unique(b));
insert into t1 values(null,100);
select * from t1 order by n;
sync_slave_with_master;
insert into t1 values(null,200),(null,300);
delete from t1 where b <> 100;
select * from t1 order by n;
connection master;
insert into t1 values(null,100),(null,350) on duplicate key update n=2;
select * from t1 order by n;
sync_slave_with_master;
select * from t1 order by n;
connection master;
drop table t1;
# End of 5.0 tests
sync_slave_with_master;
......@@ -1471,6 +1471,66 @@ next_insert_id(ulonglong nr,struct system_variables *variables)
}
void handler::adjust_next_insert_id_after_explicit_value(ulonglong nr)
{
/*
If we have set THD::next_insert_id previously and plan to insert an
explicitely-specified value larger than this, we need to increase
THD::next_insert_id to be greater than the explicit value.
*/
THD *thd= table->in_use;
if (thd->clear_next_insert_id && (nr >= thd->next_insert_id))
{
if (thd->variables.auto_increment_increment != 1)
nr= next_insert_id(nr, &thd->variables);
else
nr++;
thd->next_insert_id= nr;
DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr));
}
}
/*
Computes the largest number X:
- smaller than or equal to "nr"
- of the form: auto_increment_offset + N * auto_increment_increment
where N>=0.
SYNOPSIS
prev_insert_id
nr Number to "round down"
variables variables struct containing auto_increment_increment and
auto_increment_offset
RETURN
The number X if it exists, "nr" otherwise.
*/
inline ulonglong
prev_insert_id(ulonglong nr, struct system_variables *variables)
{
if (unlikely(nr < variables->auto_increment_offset))
{
/*
There's nothing good we can do here. That is a pathological case, where
the offset is larger than the column's max possible value, i.e. not even
the first sequence value may be inserted. User will receive warning.
*/
DBUG_PRINT("info",("auto_increment: nr: %lu cannot honour "
"auto_increment_offset: %lu",
nr, variables->auto_increment_offset));
return nr;
}
if (variables->auto_increment_increment == 1)
return nr; // optimization of the formula below
nr= (((nr - variables->auto_increment_offset)) /
(ulonglong) variables->auto_increment_increment);
return (nr * (ulonglong) variables->auto_increment_increment +
variables->auto_increment_offset);
}
/*
Update the auto_increment field if necessary
......@@ -1547,17 +1607,7 @@ bool handler::update_auto_increment()
/* Clear flag for next row */
/* Mark that we didn't generate a new value **/
auto_increment_column_changed=0;
/* Update next_insert_id if we have already generated a value */
if (thd->clear_next_insert_id && nr >= thd->next_insert_id)
{
if (variables->auto_increment_increment != 1)
nr= next_insert_id(nr, variables);
else
nr++;
thd->next_insert_id= nr;
DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr));
}
adjust_next_insert_id_after_explicit_value(nr);
DBUG_RETURN(0);
}
if (!(nr= thd->next_insert_id))
......@@ -1580,10 +1630,19 @@ bool handler::update_auto_increment()
/* Mark that we should clear next_insert_id before next stmt */
thd->clear_next_insert_id= 1;
if (!table->next_number_field->store((longlong) nr, TRUE))
if (likely(!table->next_number_field->store((longlong) nr, TRUE)))
thd->insert_id((ulonglong) nr);
else
thd->insert_id(table->next_number_field->val_int());
{
/*
overflow of the field; we'll use the max value, however we try to
decrease it to honour auto_increment_* variables:
*/
nr= prev_insert_id(table->next_number_field->val_int(), variables);
thd->insert_id(nr);
if (unlikely(table->next_number_field->store((longlong) nr, TRUE)))
thd->insert_id(nr= table->next_number_field->val_int());
}
/*
We can't set next_insert_id if the auto-increment key is not the
......
......@@ -564,6 +564,7 @@ public:
{}
virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ }
int ha_open(const char *name, int mode, int test_if_locked);
void adjust_next_insert_id_after_explicit_value(ulonglong nr);
bool update_auto_increment();
virtual void print_error(int error, myf errflag);
virtual bool get_error_message(int error, String *buf);
......
......@@ -624,8 +624,10 @@ void close_temporary_tables(THD *thd)
if (!mysql_bin_log.is_open())
{
for (table= thd->temporary_tables; table; table= table->next)
TABLE *next;
for (table= thd->temporary_tables; table; table= next)
{
next= table->next;
close_temporary(table, 1);
}
thd->temporary_tables= 0;
......@@ -648,7 +650,6 @@ void close_temporary_tables(THD *thd)
insertion sort of temp tables by pseudo_thread_id to build ordered list
of sublists of equal pseudo_thread_id
*/
for (prev_table= thd->temporary_tables, table= prev_table->next;
table;
prev_table= table, table= table->next)
......
......@@ -992,7 +992,6 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
uint key_nr;
if (error != HA_WRITE_SKIP)
goto err;
table->file->restore_auto_increment();
if ((int) (key_nr = table->file->get_dup_key(error)) < 0)
{
error=HA_WRITE_SKIP; /* Database can't find key */
......@@ -1065,20 +1064,20 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
if (res == VIEW_CHECK_ERROR)
goto before_trg_err;
if (thd->clear_next_insert_id)
{
/* Reset auto-increment cacheing if we do an update */
thd->clear_next_insert_id= 0;
thd->next_insert_id= 0;
}
if ((error=table->file->update_row(table->record[1],table->record[0])))
{
if ((error == HA_ERR_FOUND_DUPP_KEY) && info->ignore)
{
table->file->restore_auto_increment();
goto ok_or_after_trg_err;
}
goto err;
}
info->updated++;
if (table->next_number_field)
table->file->adjust_next_insert_id_after_explicit_value(table->next_number_field->val_int());
trg_error= (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
TRG_ACTION_AFTER, TRUE));
......@@ -1107,12 +1106,6 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH) &&
(!table->triggers || !table->triggers->has_delete_triggers()))
{
if (thd->clear_next_insert_id)
{
/* Reset auto-increment cacheing if we do an update */
thd->clear_next_insert_id= 0;
thd->next_insert_id= 0;
}
if ((error=table->file->update_row(table->record[1],
table->record[0])))
goto err;
......@@ -1176,6 +1169,7 @@ err:
table->file->print_error(error,MYF(0));
before_trg_err:
table->file->restore_auto_increment();
if (key)
my_safe_afree(key, table->s->max_unique_length, MAX_KEY_LENGTH);
DBUG_RETURN(1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment