Commit 88b59efe authored by Igor Babaev's avatar Igor Babaev

Merge mdev539->10.0-base.

parents f6636852 ca2cdaad
......@@ -383,7 +383,8 @@ int maria_recreate_table(HA_CHECK *param, MARIA_HA **org_info, char *filename);
int maria_disable_indexes(MARIA_HA *info);
int maria_enable_indexes(MARIA_HA *info);
int maria_indexes_are_disabled(MARIA_HA *info);
void maria_disable_non_unique_index(MARIA_HA *info, ha_rows rows);
void maria_disable_indexes_for_rebuild(MARIA_HA *info, ha_rows rows,
my_bool all_keys);
my_bool maria_test_if_sort_rep(MARIA_HA *info, ha_rows rows, ulonglong key_map,
my_bool force);
......
......@@ -335,6 +335,12 @@ enum ha_base_keytype {
#define HA_CREATE_DELAY_KEY_WRITE 64
#define HA_CREATE_RELIES_ON_SQL_LAYER 128
/* Flags used by start_bulk_insert */
#define HA_CREATE_UNIQUE_INDEX_BY_SORT 1
/*
The following flags (OR-ed) are passed to handler::info() method.
The method copies misc handler information out of the storage engine
......
......@@ -63,6 +63,8 @@
#define T_ZEROFILL_KEEP_LSN ((ulonglong) 1L << 33)
/** If repair should not bump create_rename_lsn */
#define T_NO_CREATE_RENAME_LSN ((ulonglong) 1L << 33)
#define T_CREATE_UNIQUE_BY_SORT ((ulonglong) 1L << 34)
#define T_SUPPRESS_ERR_HANDLING ((ulonglong) 1L << 35)
#define T_REP_ANY (T_REP | T_REP_BY_SORT | T_REP_PARALLEL)
......
--echo #
--echo # mdev-539: fast build of unique/primary indexes for MyISAM/Aria
--echo #
--disable_warnings
DROP DATABASE IF EXISTS dbt3_s001;
--enable_warnings
CREATE DATABASE dbt3_s001;
use dbt3_s001;
--disable_query_log
--disable_result_log
--disable_warnings
--source include/dbt3_s001.inc
--enable_warnings
--enable_result_log
--enable_query_log
drop index `primary` on lineitem;
show create table lineitem;
alter table lineitem add primary key (l_orderkey, l_linenumber);
show create table lineitem;
drop index `primary` on lineitem;
select * from lineitem where l_orderkey=1 and l_linenumber=2;
insert into lineitem values
(1,68,9,2,36,34850.16,0.07,0.06,'N','O','1996-04-12','1996-02-28','1996-04-20','TAKE BACK RETURN','MAIL','slyly bold pinto beans detect s');
select * from lineitem where l_orderkey=1 and l_linenumber=2;
--error ER_DUP_ENTRY
alter table lineitem add primary key (l_orderkey, l_linenumber);
show create table lineitem;
select * from lineitem where l_orderkey=1 and l_linenumber=2;
delete from lineitem where l_orderkey=1 and l_linenumber=2 and l_discount=0.07;
alter table lineitem add primary key (l_orderkey, l_linenumber);
show create table lineitem;
select * from lineitem where l_orderkey=1 and l_linenumber=2;
create unique index i_c_name on customer(c_name);
show create table customer;
select * from customer where c_name='Customer#000000003';
drop index i_c_name on customer;
insert into customer values
(303,'Customer#000000003','MG9kdTD2WBHm',1,'11-719-748-3364',7498.12,'AUTOMOBILE','special packages wake. slyly reg');
select * from customer where c_name='Customer#000000003';
--error ER_DUP_ENTRY
alter table customer add unique index i_c_name(c_name);
show create table customer;
select * from customer where c_name='Customer#000000003';
delete from customer where c_custkey=303;
select * from customer where c_name='Customer#000000003';
alter table customer add unique index i_c_name(c_name);
show create table customer;
select * from customer where c_name='Customer#000000003';
drop index `primary` on customer;
show create table customer;
insert into customer values
(3,'Customer#000000303','MG9kdTD2WBHm',1,'11-719-748-3364',7498.12,'AUTOMOBILE','special packages wake. slyly reg');
alter ignore table customer add primary key (c_custkey);
show create table customer;
select * from customer where c_custkey=3;
DROP DATABASE dbt3_s001;
......@@ -176,12 +176,12 @@ create table t1 (a int, b int);
alter table t1 add unique (a,b), add key (b);
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 0 a 1 a A NULL NULL NULL YES BTREE
t1 0 a 2 b A NULL NULL NULL YES BTREE
t1 0 a 1 a A 3 NULL NULL YES BTREE
t1 0 a 2 b A 300 NULL NULL YES BTREE
t1 1 b 1 b A 100 NULL NULL YES BTREE
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
test.t1 analyze status Table is already up to date
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 0 a 1 a A 3 NULL NULL YES BTREE
......
This diff is collapsed.
This diff is collapsed.
......@@ -1576,7 +1576,7 @@ update t1 set c1=lpad(id+1000, 12, ' '), c2=lpad(id+10000, 15, ' ');
alter table t1 add unique index (c1), add unique index (c2), add index (c3);
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
test.t1 analyze status Table is already up to date
explain
select * from t1 where (c1=' 100000' or c2=' 2000000');
id select_type table type possible_keys key key_len ref rows Extra
......
......@@ -399,10 +399,10 @@ WHERE Code = Country GROUP BY Code)
order by Country;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY CountryLanguage index NULL PRIMARY 33 NULL 984 Using where; Using index
3 MATERIALIZED CountryLanguage index PRIMARY PRIMARY 33 NULL 984 Using index; Using temporary
3 MATERIALIZED Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using index
2 MATERIALIZED CountryLanguage index PRIMARY PRIMARY 33 NULL 984 Using index; Using temporary
2 MATERIALIZED Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using index
3 MATERIALIZED Country index PRIMARY PRIMARY 3 NULL 239 Using index
3 MATERIALIZED CountryLanguage ref PRIMARY PRIMARY 3 world.Country.Code 4 Using index
2 MATERIALIZED Country index PRIMARY PRIMARY 3 NULL 239 Using index
2 MATERIALIZED CountryLanguage ref PRIMARY PRIMARY 3 world.Country.Code 4 Using index
select count(*)
from CountryLanguage
where
......
--echo #
set @@storage_engine= Aria;
--source include/alter_table_mdev539.inc
set @@storage_engine= default;
--echo #
set @@storage_engine= MyISAM;
--source include/alter_table_mdev539.inc
set @@storage_engine= default;
......@@ -3809,6 +3809,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
SYNOPSIS
start_bulk_insert()
rows Number of rows to insert
flags Flags to control index creation
RETURN VALUE
NONE
......@@ -3816,7 +3817,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
DESCRIPTION
rows == 0 means we will probably insert many rows
*/
void ha_partition::start_bulk_insert(ha_rows rows)
void ha_partition::start_bulk_insert(ha_rows rows, uint flags)
{
DBUG_ENTER("ha_partition::start_bulk_insert");
......
......@@ -372,7 +372,7 @@ public:
virtual int delete_row(const uchar * buf);
virtual int delete_all_rows(void);
virtual int truncate();
virtual void start_bulk_insert(ha_rows rows);
virtual void start_bulk_insert(ha_rows rows, uint flags);
virtual int end_bulk_insert();
private:
ha_rows guess_bulk_insert_rows();
......
......@@ -1952,11 +1952,11 @@ public:
/** to be actually called to get 'check()' functionality*/
int ha_check(THD *thd, HA_CHECK_OPT *check_opt);
int ha_repair(THD* thd, HA_CHECK_OPT* check_opt);
void ha_start_bulk_insert(ha_rows rows)
void ha_start_bulk_insert(ha_rows rows, uint flags= 0)
{
DBUG_ENTER("handler::ha_start_bulk_insert");
estimation_rows_to_insert= rows;
start_bulk_insert(rows);
start_bulk_insert(rows, flags);
DBUG_VOID_RETURN;
}
int ha_end_bulk_insert()
......@@ -2861,7 +2861,7 @@ private:
DBUG_ASSERT(!(ha_table_flags() & HA_CAN_REPAIR));
return HA_ADMIN_NOT_IMPLEMENTED;
}
virtual void start_bulk_insert(ha_rows rows) {}
virtual void start_bulk_insert(ha_rows rows, uint flags) {}
virtual int end_bulk_insert() { return 0; }
virtual int index_read(uchar * buf, const uchar * key, uint key_len,
enum ha_rkey_function find_flag)
......
......@@ -7469,7 +7469,8 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
MODE_STRICT_ALL_TABLES));
from->file->info(HA_STATUS_VARIABLE);
to->file->ha_start_bulk_insert(from->file->stats.records);
to->file->ha_start_bulk_insert(from->file->stats.records,
ignore ? 0 : HA_CREATE_UNIQUE_INDEX_BY_SORT);
errpos= 3;
copy_end=copy;
......
......@@ -398,6 +398,9 @@ static void _ma_check_print_msg(HA_CHECK *param, const char *msg_type,
char msgbuf[MYSQL_ERRMSG_SIZE];
char name[NAME_LEN * 2 + 2];
if (param->testflag & T_SUPPRESS_ERR_HANDLING)
return;
msg_length= my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args);
msgbuf[sizeof(msgbuf) - 1]= 0; // healthy paranoia
......@@ -851,6 +854,8 @@ void _ma_check_print_error(HA_CHECK *param, const char *fmt, ...)
DBUG_ENTER("_ma_check_print_error");
param->error_printed |= 1;
param->out_flag |= O_DATA_LOST;
if (param->testflag & T_SUPPRESS_ERR_HANDLING)
DBUG_VOID_RETURN;
va_start(args, fmt);
_ma_check_print_msg(param, "error", fmt, args);
va_end(args);
......@@ -1580,6 +1585,7 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize)
param->thd= thd;
param->tmpdir= &mysql_tmpdir_list;
param->out_flag= 0;
share->state.dupp_key= MI_MAX_KEY;
strmov(fixed_name, share->open_file_name.str);
// Don't lock tables if we have used LOCK TABLE
......@@ -1630,6 +1636,10 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize)
error= maria_repair_by_sort(param, file, fixed_name,
test(param->testflag & T_QUICK));
}
if (error && file->create_unique_index_by_sort &&
share->state.dupp_key != MAX_KEY)
print_keydup_error(share->state.dupp_key,
ER(ER_DUP_ENTRY_WITH_KEY_NAME), MYF(0));
}
else
{
......@@ -1938,6 +1948,8 @@ int ha_maria::enable_indexes(uint mode)
param.op_name= "recreating_index";
param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
T_CREATE_MISSING_KEYS | T_SAFE_REPAIR);
if (file->create_unique_index_by_sort)
param.testflag|= T_CREATE_UNIQUE_BY_SORT;
if (bulk_insert_single_undo == BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR)
{
bulk_insert_single_undo= BULK_INSERT_SINGLE_UNDO_AND_REPAIR;
......@@ -2018,15 +2030,16 @@ int ha_maria::indexes_are_disabled(void)
activate special bulk-insert optimizations
SYNOPSIS
start_bulk_insert(rows)
rows Rows to be inserted
start_bulk_insert(rows, flags)
rows Rows to be inserted
0 if we don't know
flags Flags to control index creation
NOTICE
Do not forget to call end_bulk_insert() later!
*/
void ha_maria::start_bulk_insert(ha_rows rows)
void ha_maria::start_bulk_insert(ha_rows rows, uint flags)
{
DBUG_ENTER("ha_maria::start_bulk_insert");
THD *thd= table->in_use;
......@@ -2092,7 +2105,10 @@ void ha_maria::start_bulk_insert(ha_rows rows)
maria_clear_all_keys_active(file->s->state.key_map);
}
else
maria_disable_non_unique_index(file, rows);
{
my_bool all_keys= test(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT);
maria_disable_indexes_for_rebuild(file, rows, all_keys);
}
if (share->now_transactional)
{
bulk_insert_single_undo= BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR;
......
......@@ -125,7 +125,7 @@ public:
int disable_indexes(uint mode);
int enable_indexes(uint mode);
int indexes_are_disabled(void);
void start_bulk_insert(ha_rows rows);
void start_bulk_insert(ha_rows rows, uint flags);
int end_bulk_insert();
ha_rows records_in_range(uint inx, key_range * min_key, key_range * max_key);
void update_create_info(HA_CREATE_INFO * create_info);
......
......@@ -3879,7 +3879,10 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info,
(my_bool) (!(param->testflag & T_VERBOSE)),
(size_t) param->sort_buffer_length))
{
param->retry_repair=1;
if ((param->testflag & T_CREATE_UNIQUE_BY_SORT) && sort_param.sort_info->dupp)
share->state.dupp_key= sort_param.key;
else
param->retry_repair= 1;
_ma_check_print_error(param, "Create index by sort failed");
goto err;
}
......@@ -5498,6 +5501,9 @@ static int sort_key_write(MARIA_SORT_PARAM *sort_param, const uchar *a)
sort_info->dupp++;
sort_info->info->cur_row.lastpos= get_record_for_key(sort_param->keyinfo,
a);
if ((param->testflag & (T_CREATE_UNIQUE_BY_SORT | T_SUPPRESS_ERR_HANDLING))
== T_CREATE_UNIQUE_BY_SORT)
param->testflag|= T_SUPPRESS_ERR_HANDLING;
_ma_check_print_warning(param,
"Duplicate key %2u for record at %10s against "
"record at %10s",
......@@ -6410,7 +6416,7 @@ static my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows)
}
/*
Deactivate all not unique index that can be recreated fast
Deactivate all indexes that can be recreated fast.
These include packed keys on which sorting will use more temporary
space than the max allowed file length or for which the unpacked keys
will take much more space than packed keys.
......@@ -6418,7 +6424,8 @@ static my_bool maria_too_big_key_for_sort(MARIA_KEYDEF *key, ha_rows rows)
rows we will put into the file.
*/
void maria_disable_non_unique_index(MARIA_HA *info, ha_rows rows)
void maria_disable_indexes_for_rebuild(MARIA_HA *info, ha_rows rows,
my_bool all_keys)
{
MARIA_SHARE *share= info->s;
MARIA_KEYDEF *key=share->keyinfo;
......@@ -6428,12 +6435,13 @@ void maria_disable_non_unique_index(MARIA_HA *info, ha_rows rows)
(!rows || rows >= MARIA_MIN_ROWS_TO_DISABLE_INDEXES));
for (i=0 ; i < share->base.keys ; i++,key++)
{
if (!(key->flag &
(HA_NOSAME | HA_SPATIAL | HA_AUTO_KEY | HA_RTREE_INDEX)) &&
! maria_too_big_key_for_sort(key,rows) && share->base.auto_key != i+1)
if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY | HA_RTREE_INDEX)) &&
! maria_too_big_key_for_sort(key,rows) && share->base.auto_key != i+1 &&
(all_keys || !(key->flag & HA_NOSAME)))
{
maria_clear_key_active(share->state.key_map, i);
info->update|= HA_STATE_CHANGED;
info->create_unique_index_by_sort= all_keys;
}
}
}
......
......@@ -196,6 +196,9 @@ typedef struct st_maria_state_info
/* LSN when we wrote file id to the log */
LSN logrec_file_id;
uint8 dupp_key; /* Lastly processed index with */
/* violated uniqueness constraint */
/* the following isn't saved on disk */
uint state_diff_length; /* Should be 0 */
uint state_length; /* Length of state header in file */
......@@ -667,6 +670,7 @@ struct st_maria_handler
uchar length_buff[5]; /* temp buff to store blob lengths */
int maria_rtree_recursion_depth;
my_bool create_unique_index_by_sort;
index_cond_func_t index_cond_func; /* Index condition function */
void *index_cond_func_arg; /* parameter for the func */
};
......
......@@ -139,6 +139,9 @@ static void mi_check_print_msg(HA_CHECK *param, const char* msg_type,
char msgbuf[MYSQL_ERRMSG_SIZE];
char name[NAME_LEN*2+2];
if (param->testflag & T_SUPPRESS_ERR_HANDLING)
return;
msg_length= my_vsnprintf(msgbuf, sizeof(msgbuf), fmt, args);
msgbuf[sizeof(msgbuf) - 1] = 0; // healthy paranoia
......@@ -571,6 +574,8 @@ void mi_check_print_error(HA_CHECK *param, const char *fmt,...)
{
param->error_printed|=1;
param->out_flag|= O_DATA_LOST;
if (param->testflag & T_SUPPRESS_ERR_HANDLING)
return;
va_list args;
va_start(args, fmt);
mi_check_print_msg(param, "error", fmt, args);
......@@ -1071,6 +1076,7 @@ int ha_myisam::repair(THD *thd, HA_CHECK &param, bool do_optimize)
param.thd= thd;
param.tmpdir= &mysql_tmpdir_list;
param.out_flag= 0;
share->state.dupp_key= MI_MAX_KEY;
strmov(fixed_name,file->filename);
// Release latches since this can take a long time
......@@ -1128,6 +1134,10 @@ int ha_myisam::repair(THD *thd, HA_CHECK &param, bool do_optimize)
error = mi_repair_by_sort(&param, file, fixed_name,
test(param.testflag & T_QUICK));
}
if (error && file->create_unique_index_by_sort &&
share->state.dupp_key != MAX_KEY)
print_keydup_error(share->state.dupp_key,
ER(ER_DUP_ENTRY_WITH_KEY_NAME), MYF(0));
}
else
{
......@@ -1435,6 +1445,8 @@ int ha_myisam::enable_indexes(uint mode)
param.op_name= "recreating_index";
param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK |
T_CREATE_MISSING_KEYS);
if (file->create_unique_index_by_sort)
param.testflag|= T_CREATE_UNIQUE_BY_SORT;
param.myf_rw&= ~MY_WAIT_IF_FULL;
param.sort_buffer_length= THDVAR(thd, sort_buffer_size);
param.stats_method= (enum_handler_stats_method)THDVAR(thd, stats_method);
......@@ -1502,15 +1514,16 @@ int ha_myisam::indexes_are_disabled(void)
activate special bulk-insert optimizations
SYNOPSIS
start_bulk_insert(rows)
start_bulk_insert(rows, flags)
rows Rows to be inserted
0 if we don't know
flags Flags to control index creation
NOTICE
Do not forget to call end_bulk_insert() later!
*/
void ha_myisam::start_bulk_insert(ha_rows rows)
void ha_myisam::start_bulk_insert(ha_rows rows, uint flags)
{
DBUG_ENTER("ha_myisam::start_bulk_insert");
THD *thd= current_thd;
......@@ -1544,7 +1557,10 @@ void ha_myisam::start_bulk_insert(ha_rows rows)
mi_clear_all_keys_active(file->s->state.key_map);
}
else
mi_disable_non_unique_index(file,rows);
{
my_bool all_keys= test(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT);
mi_disable_indexes_for_rebuild(file, rows, all_keys);
}
}
else
if (!file->bulk_insert &&
......
......@@ -115,7 +115,7 @@ class ha_myisam: public handler
int disable_indexes(uint mode);
int enable_indexes(uint mode);
int indexes_are_disabled(void);
void start_bulk_insert(ha_rows rows);
void start_bulk_insert(ha_rows rows, uint flags);
int end_bulk_insert();
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
void update_create_info(HA_CREATE_INFO *create_info);
......
......@@ -2408,7 +2408,10 @@ int mi_repair_by_sort(HA_CHECK *param, register MI_INFO *info,
(my_bool) (!(param->testflag & T_VERBOSE)),
param->sort_buffer_length))
{
param->retry_repair= 1;
if ((param->testflag & T_CREATE_UNIQUE_BY_SORT) && sort_param.sort_info->dupp)
share->state.dupp_key= sort_param.key;
else
param->retry_repair= 1;
if (! param->error_printed)
mi_check_print_error(param, "Couldn't fix table with create_index_by_sort(). Error: %d",
my_errno);
......@@ -3829,6 +3832,9 @@ static int sort_key_write(MI_SORT_PARAM *sort_param, const void *a)
sort_info->info->lastpos=get_record_for_key(sort_info->info,
sort_param->keyinfo,
(uchar*) a);
if ((param->testflag & (T_CREATE_UNIQUE_BY_SORT | T_SUPPRESS_ERR_HANDLING))
== T_CREATE_UNIQUE_BY_SORT)
param->testflag|= T_SUPPRESS_ERR_HANDLING;
mi_check_print_warning(param,
"Duplicate key for record at %10s against record at %10s",
llstr(sort_info->info->lastpos,llbuff),
......@@ -4664,7 +4670,7 @@ static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
}
/*
Deactivate all not unique index that can be recreated fast
Deactivate all indexes that can be recreated fast.
These include packed keys on which sorting will use more temporary
space than the max allowed file length or for which the unpacked keys
will take much more space than packed keys.
......@@ -4672,7 +4678,8 @@ static my_bool mi_too_big_key_for_sort(MI_KEYDEF *key, ha_rows rows)
rows we will put into the file.
*/
void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows)
void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows,
my_bool all_keys)
{
MYISAM_SHARE *share=info->s;
MI_KEYDEF *key=share->keyinfo;
......@@ -4682,11 +4689,13 @@ void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows)
(!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES));
for (i=0 ; i < share->base.keys ; i++,key++)
{
if (!(key->flag & (HA_NOSAME | HA_SPATIAL | HA_AUTO_KEY)) &&
! mi_too_big_key_for_sort(key,rows) && info->s->base.auto_key != i+1)
if (!(key->flag & (HA_SPATIAL | HA_AUTO_KEY)) &&
! mi_too_big_key_for_sort(key,rows) && info->s->base.auto_key != i+1 &&
(all_keys || !(key->flag & HA_NOSAME)))
{
mi_clear_key_active(share->state.key_map, i);
info->update|= HA_STATE_CHANGED;
info->create_unique_index_by_sort= all_keys;
}
}
}
......
......@@ -82,6 +82,9 @@ typedef struct st_mi_state_info
uint open_count;
uint8 changed; /* Changed since myisamchk */
uint8 dupp_key; /* Lastly processed index with */
/* violated uniqueness constraint */
/* the following isn't saved on disk */
uint state_diff_length; /* Should be 0 */
uint state_length; /* Length of state header in file */
......@@ -297,6 +300,7 @@ struct st_myisam_info
my_bool page_changed;
/* If info->buff has to be reread for rnext */
my_bool buff_used;
my_bool create_unique_index_by_sort;
index_cond_func_t index_cond_func; /* Index condition function */
void *index_cond_func_arg; /* parameter for the func */
THR_LOCK_DATA lock;
......@@ -705,8 +709,8 @@ void mi_restore_status(void *param);
void mi_copy_status(void *to, void *from);
my_bool mi_check_status(void *param);
void mi_fix_status(MI_INFO *org_table, MI_INFO *new_table);
void mi_disable_non_unique_index(MI_INFO *info, ha_rows rows);
void mi_disable_indexes_for_rebuild(MI_INFO *info, ha_rows rows,
my_bool all_keys);
extern MI_INFO *test_if_reopen(char *filename);
my_bool check_table_is_closed(const char *name, const char *where);
int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share, const char *orn_name,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment