Commit 2955fb17 authored by monty@donna.mysql.fi's avatar monty@donna.mysql.fi

Merge work:/my/mysql into donna.mysql.fi:/home/my/bk/mysql

parents c28eda48 2aa2255c
......@@ -597,7 +597,7 @@ Replication in MySQL
* Replication Options:: Replication Options in my.cnf
* Replication SQL:: SQL Commands related to replication
* Replication FAQ:: Frequently Asked Questions about replication
* Troubleshooting Replication:: Troubleshooting Replication. Troubleshooting Replication. Troubleshooting Replication. Troubleshooting Replication.
* Replication Problems:: Troubleshooting Replication.
Getting Maximum Performance from MySQL
......@@ -20816,6 +20816,9 @@ only partly indexed. @code{NULL} if the entire key is indexed.
For now, it tells whether index is FULLTEXT or not.
@end multitable
Note that as the @code{Cardinality} is counted based on statistics
stored as integers, it's not necessarily accurate for small tables.
@cindex displaying, table status
@cindex tables, displaying status
@cindex status, tables
......@@ -23669,9 +23672,24 @@ Change to not use page locks at all when we are scanning tables.
@node BDB errors, , BDB TODO, BDB
@subsection Errors You May Get When Using BDB Tables
@itemize @bullet
@item
If you get the following error in the @code{hostname.err log} when
starting @code{mysqld}:
@example
bdb: Ignoring log file: .../log.XXXXXXXXXX: unsupported log version #
@end example
it means that the new @code{BDB} version doesn't support the old log
file format. In this case you have to delete all @code{BDB} log BDB
from your database directory (the files that has the format
@code{log.XXXXXXXXXX} ) and restart @code{mysqld}. We would also
recommend you to do a @code{mysqldump --opt} of your old @code{BDB}
tables, delete the old table and restore the dump.
@item
If you are running in not @code{auto_commit} mode and delete a table you
are using you may get the following error messages in the @strong{MySQL}
error file:
are using by another thread you may get the following error messages in
the @strong{MySQL} error file:
@example
001119 23:43:56 bdb: Missing log fileid entry
......@@ -23681,6 +23699,7 @@ error file:
This is not fatal but we don't recommend that you delete tables if you are
not in @code{auto_commit} mode, until this problem is fixed (the fix is
not trivial).
@end itemize
@cindex tables, @code{GEMINI}
@node GEMINI, INNOBASE, BDB, Table types
......@@ -26729,7 +26748,7 @@ tables}.
* Replication Options:: Replication Options in my.cnf
* Replication SQL:: SQL Commands related to replication
* Replication FAQ:: Frequently Asked Questions about replication
* Troubleshooting Replication:: Troubleshooting Replication. Troubleshooting Replication. Troubleshooting Replication. Troubleshooting Replication.
* Replication Problems:: Troubleshooting Replication.
@end menu
@node Replication Intro, Replication Implementation, Replication, Replication
......@@ -26937,7 +26956,7 @@ Replication will be done correctly with @code{AUTO_INCREMENT},
@code{LAST_INSERT_ID}, and @code{TIMESTAMP} values.
@item
@code{RAND()} in updates does not replicate properly. Use
@code{RAND(some_non_rand_expr)} if you are replcating updates with
@code{RAND(some_non_rand_expr)} if you are replcating updates with
@code{RAND()}. You can, for example, use @code{UNIX_TIMESTAMP()} for the
argument to @code{RAND()}.
@item
......@@ -26947,6 +26966,14 @@ propagation. @code{LOAD LOCAL DATA INFILE} will be skipped.
@item
Update queries that use user variables are not replication-safe (yet).
@item
@code{FLUSH} commands are not stored in the binary log and are because
of this not replicated to the slaves. This is not normally a problem as
@code{FLUSH} doesn't change anything. This does however mean that if you
update the @code{MySQL} privilege tables directly without using
@code{GRANT} statement and you replicate the @code{MySQL} privilege
database, you must do a @code{FLUSH PRIVILEGES} on your slaves to put
the new privileges into effect.
@item
Temporary tables starting in 3.23.29 are replicated properly with the
exception of the case when you shut down slave server ( not just slave thread),
you have some temporary tables open, and the are used in subsequent updates.
......@@ -27293,7 +27320,7 @@ last log on the list), backup all the logs you are about to delete
@end multitable
@node Replication FAQ, Troubleshooting Replication, Replication SQL, Replication
@node Replication FAQ, Replication Problems, Replication SQL, Replication
@section Replication FAQ
@cindex @code{Binlog_Dump}
......@@ -27546,7 +27573,7 @@ We are currently working on intergrating an automatic master election
system into @strong{MySQL}, but until it is ready, you will have to
create your own monitoring tools.
@node Troubleshooting Replication, , Replication FAQ, Replication
@node Replication Problems, , Replication FAQ, Replication
@section Troubleshooting Replication
If you have followed the instructions, and your replication setup is not
......@@ -42210,6 +42237,10 @@ Fixed wrong define @code{CLIENT_TRANSACTIONS}.
Fixed bug in @code{SHOW VARIABLES} when using INNOBASE tables.
@item
Setting and using user variables in @code{SELECT DISTINCT} didn't work.
@item
Tuned @code{SHOW ANALYZE} for small tables.
@item
Fixed handling of arguments in the benchmark script @code{run-all-tests}.
@end itemize
@node News-3.23.34a, News-3.23.34, News-3.23.35, News-3.23.x
......@@ -580,9 +580,7 @@ static int chk_index(MI_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo,
}
if (param->testflag & T_STATISTICS)
{
if (*keys == 1L) /* first_key */
param->unique_count[keyinfo->keysegs]++;
else
if (*keys != 1L) /* not first_key */
{
uint diff;
_mi_key_cmp(keyinfo->seg,info->lastkey,key,USE_WHOLE_KEY,SEARCH_FIND,
......@@ -2520,10 +2518,11 @@ static int sort_key_write(SORT_INFO *sort_info, const void *a)
{
cmp=_mi_key_cmp(sort_info->keyseg,sort_info->key_block->lastkey,(uchar*) a,
USE_WHOLE_KEY,SEARCH_FIND | SEARCH_UPDATE ,&diff_pos);
sort_info->unique[diff_pos-1]++;
}
else
{
cmp= -1; diff_pos=sort_info->keyinfo->keysegs;
cmp= -1;
}
if ((sort_info->keyinfo->flag & HA_NOSAME) && cmp == 0)
{
......@@ -2544,7 +2543,6 @@ static int sort_key_write(SORT_INFO *sort_info, const void *a)
_mi_print_key(stdout,sort_info->keyseg,(uchar*) a, USE_WHOLE_KEY);
return (sort_delete_record(param));
}
sort_info->unique[diff_pos-1]++;
#ifndef DBUG_OFF
if (cmp > 0)
{
......@@ -3099,7 +3097,7 @@ static void update_key_parts(MI_KEYDEF *keyinfo,
if (count == 0)
tmp=records;
else
tmp= (records+count/2) / count;
tmp= (records + (count+1)/2) / (count+1);
if (tmp >= (ulonglong) ~(ulong) 0)
tmp=(ulonglong) ~(ulong) 0;
*rec_per_key_part=(ulong) tmp;
......
......@@ -23,7 +23,7 @@ c int(11) 0 select,insert,update,references
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Comment
t1 0 PRIMARY 1 a A 4 NULL NULL
t1 1 b 1 b A 1 NULL NULL
t1 1 b 2 c A 2 NULL NULL
t1 1 b 2 c A 4 NULL NULL
Table Op Msg_type Msg_text
test.t1 check status Table is already up to date
Table Op Msg_type Msg_text
......@@ -39,7 +39,7 @@ test.t1 check status OK
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Comment
t1 0 PRIMARY 1 a A 5 NULL NULL
t1 1 b 1 b A 1 NULL NULL
t1 1 b 2 c A 2 NULL NULL
t1 1 b 2 c A 5 NULL NULL
Table Op Msg_type Msg_text
test.t1 optimize status OK
Table Op Msg_type Msg_text
......@@ -53,3 +53,17 @@ mysql
test
Database (test%)
test
Table Op Msg_type Msg_text
test.t1 analyze status OK
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Comment
t1 0 PRIMARY 1 f1 A 1 NULL NULL
t1 0 PRIMARY 2 f2 A 3 NULL NULL
t1 0 PRIMARY 3 f3 A 9 NULL NULL
t1 0 PRIMARY 4 f4 A 18 NULL NULL
Table Op Msg_type Msg_text
test.t1 repair status OK
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Comment
t1 0 PRIMARY 1 f1 A 1 NULL NULL
t1 0 PRIMARY 2 f2 A 3 NULL NULL
t1 0 PRIMARY 3 f3 A 9 NULL NULL
t1 0 PRIMARY 4 f4 A 18 NULL NULL
......@@ -6,3 +6,11 @@ i
i @vv1:=if(sv1.i,1,0) @vv2:=if(sv2.i,1,0) @vv3:=if(sv3.i,1,0) @vv1+@vv2+@vv3
1 1 0 1 2
2 1 0 0 1
table type possible_keys key key_len ref rows Extra
t1 ref i i 4 const 1 where used
table type possible_keys key key_len ref rows Extra
t1 ALL NULL NULL NULL NULL 3 where used
table type possible_keys key key_len ref rows Extra
t1 index NULL i 4 NULL 3 where used; Using index
table type possible_keys key key_len ref rows Extra
t1 ref i i 4 const 1 where used
......@@ -13,7 +13,7 @@ lock tables t1 read;
check table t2,t1;
show columns from t1;
show full columns from t1;
show keys from t1;
show index from t1;
drop table t1,t2;
create table t1 (a int not null primary key, b int not null,c int not null, key(b,c));
......@@ -25,7 +25,7 @@ insert into t1 values (5,5,5);
check table t1 type=changed;
check table t1 type=medium;
check table t1 type=extended;
show keys from t1;
show index from t1;
!$1062 insert into t1 values (5,5,5);
optimize table t1;
optimize table t1;
......@@ -37,3 +37,14 @@ show variables like "this_doesn't_exists%";
show table status from test like "this_doesn't_exists%";
show databases;
show databases like "test%";
#
# Check of show index
#
create table t1 (f1 int not null, f2 int not null, f3 int not null, f4 int not null, primary key(f1,f2,f3,f4));
insert into t1 values (1,1,1,0),(1,1,2,0),(1,1,3,0),(1,2,1,0),(1,2,2,0),(1,2,3,0),(1,3,1,0),(1,3,2,0),(1,3,3,0),(1,1,1,1),(1,1,2,1),(1,1,3,1),(1,2,1,1),(1,2,2,1),(1,2,3,1),(1,3,1,1),(1,3,2,1),(1,3,3,1);
analyze table t1;
show index from t1;
repair table t1;
show index from t1;
drop table t1;
......@@ -12,4 +12,8 @@ create table t2 (i int not null, unique (i));
insert into t2 select distinct i from t1;
select * from t2;
select distinct t2.i,@vv1:=if(sv1.i,1,0),@vv2:=if(sv2.i,1,0),@vv3:=if(sv3.i,1,0), @vv1+@vv2+@vv3 from t2 left join t1 as sv1 on sv1.i=t2.i and sv1.v=1 left join t1 as sv2 on sv2.i=t2.i and sv2.v=2 left join t1 as sv3 on sv3.i=t2.i and sv3.v=3;
explain select * from t1 where i=@vv1;
explain select * from t1 where @vv1:=@vv1+1 and i=@vv1;
explain select @vv1:=i from t1 where i=@vv1;
explain select * from t1 where i=@vv1;
drop table t1,t2;
......@@ -1606,6 +1606,7 @@ static user_var_entry *get_variable(HASH *hash, LEX_STRING &name,
entry->name.length=name.length;
entry->value=0;
entry->length=0;
entry->update_query_id=0;
entry->type=STRING_RESULT;
memcpy(entry->name.str, name.str, name.length+1);
if (hash_insert(hash,(byte*) entry))
......@@ -1625,6 +1626,7 @@ bool Item_func_set_user_var::fix_fields(THD *thd,TABLE_LIST *tables)
if (Item_func::fix_fields(thd,tables) ||
!(entry= get_variable(&thd->user_vars, name, 1)))
return 1;
entry->update_query_id=thd->query_id;
return 0;
}
......@@ -1809,10 +1811,12 @@ longlong Item_func_get_user_var::val_int()
void Item_func_get_user_var::fix_length_and_dec()
{
THD *thd=current_thd;
maybe_null=1;
decimals=NOT_FIXED_DEC;
max_length=MAX_BLOB_WIDTH;
entry= get_variable(&current_thd->user_vars, name, 0);
if ((entry= get_variable(&thd->user_vars, name, 0)))
const_var_flag= thd->query_id != entry->update_query_id;
}
......
......@@ -825,9 +825,11 @@ class Item_func_get_user_var :public Item_func
{
LEX_STRING name;
user_var_entry *entry;
bool const_var_flag;
public:
Item_func_get_user_var(LEX_STRING a): Item_func(), name(a) {}
Item_func_get_user_var(LEX_STRING a):
Item_func(), name(a), const_var_flag(1) {}
user_var_entry *get_entry();
double val();
longlong val_int();
......@@ -835,8 +837,9 @@ public:
void fix_length_and_dec();
enum Item_result result_type() const;
const char *func_name() const { return "get_user_var"; }
bool const_item() const { return 0; }
table_map used_tables() const { return RAND_TABLE_BIT; }
bool const_item() const { return const_var_flag; }
table_map used_tables() const
{ return const_var_flag ? 0 : RAND_TABLE_BIT; }
};
class Item_func_inet_aton : public Item_int_func
......
......@@ -550,7 +550,7 @@ class user_var_entry
public:
LEX_STRING name;
char *value;
ulong length;
ulong length, update_query_id;
Item_result type;
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment