Commit 58ccbb7c authored by Satya B's avatar Satya B

merge to mysql-5.1-bugteam

parents 83d5ca95 a49b18a7
......@@ -20,7 +20,6 @@ ndb.* # joro : NDB tests marked as experiment
rpl.rpl_get_master_version_and_clock* # Bug #49191 2009-12-01 Daogang rpl_get_master_version_and_clock failed on PB2: COM_REGISTER_SLAVE failed
rpl.rpl_innodb_bug28430* @solaris # Bug#46029
rpl.rpl_trigger* # Bug#47810 2009-10-04 joro rpl.rpl_trigger.test fails with valgrind errors with the innodb plugin
rpl_ndb.* # joro : NDB tests marked as experimental as agreed with bochklin
rpl_ndb.rpl_ndb_log # Bug#38998
......
......@@ -40,6 +40,26 @@ select t2.isbn,city,t1.libname,count(distinct t1.libname) as a from t3 left join
isbn city libname a
007 Berkeley Berkeley Public1 2
000 New York New York Public Libra 2
select t2.isbn,city,@bar:=t1.libname,count(distinct t1.libname) as a
from t3 left join t1 on t3.libname=t1.libname left join t2
on t3.isbn=t2.isbn group by city having count(distinct
t1.libname) > 1;
isbn city @bar:=t1.libname a
007 Berkeley Berkeley Public1 2
000 New York New York Public Libra 2
SELECT @bar;
@bar
Berkeley Public2
select t2.isbn,city,concat(@bar:=t1.libname),count(distinct t1.libname) as a
from t3 left join t1 on t3.libname=t1.libname left join t2
on t3.isbn=t2.isbn group by city having count(distinct
t1.libname) > 1;
isbn city concat(@bar:=t1.libname) a
007 Berkeley Berkeley Public1 2
000 New York New York Public Libra 2
SELECT @bar;
@bar
Berkeley Public2
drop table t1, t2, t3;
create table t1 (f1 int);
insert into t1 values (1);
......
......@@ -409,6 +409,21 @@ SELECT a, b FROM t1 WHERE a=2 AND b=3 GROUP BY a, b;
a b
2 3
DROP TABLE t1;
CREATE TABLE t1 (f1 int(11) default NULL, f2 int(11) default NULL);
CREATE TABLE t2 (f1 int(11) default NULL, f2 int(11) default NULL, foo int(11));
CREATE TABLE t3 (f1 int(11) default NULL, f2 int(11) default NULL);
INSERT INTO t1 VALUES(10, 10);
INSERT INTO t1 VALUES(10, 10);
INSERT INTO t2 VALUES(10, 10, 10);
INSERT INTO t2 VALUES(10, 10, 10);
INSERT INTO t3 VALUES(10, 10);
INSERT INTO t3 VALUES(10, 10);
SELECT MIN(t2.f1),
@bar:= (SELECT MIN(t3.f2) FROM t3 WHERE t3.f2 > foo)
FROM t1,t2 WHERE t1.f1 = t2.f1 ORDER BY t2.f1;
MIN(t2.f1) @bar:= (SELECT MIN(t3.f2) FROM t3 WHERE t3.f2 > foo)
10 NULL
DROP TABLE t1, t2, t3;
End of 5.0 tests
CREATE TABLE t1 (i INT);
CREATE TRIGGER t_after_insert AFTER INSERT ON t1 FOR EACH ROW SET @bug42188 = 10;
......
......@@ -35,6 +35,25 @@ insert into t1 values ('NYC Lib','New York');
select t2.isbn,city,t1.libname,count(t1.libname) as a from t3 left join t1 on t3.libname=t1.libname left join t2 on t3.isbn=t2.isbn group by city,t1.libname;
select t2.isbn,city,t1.libname,count(distinct t1.libname) as a from t3 left join t1 on t3.libname=t1.libname left join t2 on t3.isbn=t2.isbn group by city having count(distinct t1.libname) > 1;
select t2.isbn,city,t1.libname,count(distinct t1.libname) as a from t3 left join t1 on t3.libname=t1.libname left join t2 on t3.isbn=t2.isbn group by city having count(distinct concat(t1.libname,'a')) > 1;
select t2.isbn,city,@bar:=t1.libname,count(distinct t1.libname) as a
from t3 left join t1 on t3.libname=t1.libname left join t2
on t3.isbn=t2.isbn group by city having count(distinct
t1.libname) > 1;
#
# Wrong result, see bug#49872
#
SELECT @bar;
select t2.isbn,city,concat(@bar:=t1.libname),count(distinct t1.libname) as a
from t3 left join t1 on t3.libname=t1.libname left join t2
on t3.isbn=t2.isbn group by city having count(distinct
t1.libname) > 1;
#
# Wrong result, see bug#49872
#
SELECT @bar;
drop table t1, t2, t3;
#
......
......@@ -295,6 +295,26 @@ SELECT @a, @b;
SELECT a, b FROM t1 WHERE a=2 AND b=3 GROUP BY a, b;
DROP TABLE t1;
#
# Bug#47371: reference by same column name
#
CREATE TABLE t1 (f1 int(11) default NULL, f2 int(11) default NULL);
CREATE TABLE t2 (f1 int(11) default NULL, f2 int(11) default NULL, foo int(11));
CREATE TABLE t3 (f1 int(11) default NULL, f2 int(11) default NULL);
INSERT INTO t1 VALUES(10, 10);
INSERT INTO t1 VALUES(10, 10);
INSERT INTO t2 VALUES(10, 10, 10);
INSERT INTO t2 VALUES(10, 10, 10);
INSERT INTO t3 VALUES(10, 10);
INSERT INTO t3 VALUES(10, 10);
SELECT MIN(t2.f1),
@bar:= (SELECT MIN(t3.f2) FROM t3 WHERE t3.f2 > foo)
FROM t1,t2 WHERE t1.f1 = t2.f1 ORDER BY t2.f1;
DROP TABLE t1, t2, t3;
--echo End of 5.0 tests
#
......
......@@ -5747,6 +5747,23 @@ const key_map *ha_partition::keys_to_use_for_scanning()
DBUG_RETURN(m_file[0]->keys_to_use_for_scanning());
}
#define MAX_PARTS_FOR_OPTIMIZER_CALLS 10
/*
Prepare start variables for estimating optimizer costs.
@param[out] num_used_parts Number of partitions after pruning.
@param[out] check_min_num Number of partitions to call.
@param[out] first first used partition.
*/
void ha_partition::partitions_optimizer_call_preparations(uint *first,
uint *num_used_parts,
uint *check_min_num)
{
*first= bitmap_get_first_set(&(m_part_info->used_partitions));
*num_used_parts= bitmap_bits_set(&(m_part_info->used_partitions));
*check_min_num= min(MAX_PARTS_FOR_OPTIMIZER_CALLS, *num_used_parts);
}
/*
Return time for a scan of the table
......@@ -5760,43 +5777,67 @@ const key_map *ha_partition::keys_to_use_for_scanning()
double ha_partition::scan_time()
{
double scan_time= 0;
handler **file;
double scan_time= 0.0;
uint first, part_id, num_used_parts, check_min_num, partitions_called= 0;
DBUG_ENTER("ha_partition::scan_time");
for (file= m_file; *file; file++)
if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
scan_time+= (*file)->scan_time();
partitions_optimizer_call_preparations(&first, &num_used_parts, &check_min_num);
for (part_id= first; partitions_called < num_used_parts ; part_id++)
{
if (!bitmap_is_set(&(m_part_info->used_partitions), part_id))
continue;
scan_time+= m_file[part_id]->scan_time();
partitions_called++;
if (partitions_called >= check_min_num && scan_time != 0.0)
{
DBUG_RETURN(scan_time *
(double) num_used_parts / (double) partitions_called);
}
}
DBUG_RETURN(scan_time);
}
/*
Get time to read
Estimate rows for records_in_range or estimate_rows_upper_bound.
SYNOPSIS
read_time()
index Index number used
ranges Number of ranges
rows Number of rows
RETURN VALUE
time for read
@param is_records_in_range call records_in_range instead of
estimate_rows_upper_bound.
@param inx (only for records_in_range) index to use.
@param min_key (only for records_in_range) start of range.
@param max_key (only for records_in_range) end of range.
DESCRIPTION
This will be optimised later to include whether or not the index can
be used with partitioning. To achieve we need to add another parameter
that specifies how many of the index fields that are bound in the ranges.
Possibly added as a new call to handlers.
@return Number of rows or HA_POS_ERROR.
*/
double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
ha_rows ha_partition::estimate_rows(bool is_records_in_range, uint inx,
key_range *min_key, key_range *max_key)
{
DBUG_ENTER("ha_partition::read_time");
ha_rows rows, estimated_rows= 0;
uint first, part_id, num_used_parts, check_min_num, partitions_called= 0;
DBUG_ENTER("ha_partition::records_in_range");
DBUG_RETURN(m_file[0]->read_time(index, ranges, rows));
partitions_optimizer_call_preparations(&first, &num_used_parts, &check_min_num);
for (part_id= first; partitions_called < num_used_parts ; part_id++)
{
if (!bitmap_is_set(&(m_part_info->used_partitions), part_id))
continue;
if (is_records_in_range)
rows= m_file[part_id]->records_in_range(inx, min_key, max_key);
else
rows= m_file[part_id]->estimate_rows_upper_bound();
if (rows == HA_POS_ERROR)
DBUG_RETURN(HA_POS_ERROR);
estimated_rows+= rows;
partitions_called++;
if (partitions_called >= check_min_num && estimated_rows)
{
DBUG_RETURN(estimated_rows * num_used_parts / partitions_called);
}
}
DBUG_RETURN(estimated_rows);
}
/*
Find number of records in a range
......@@ -5824,22 +5865,9 @@ double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
key_range *max_key)
{
handler **file;
ha_rows in_range= 0;
DBUG_ENTER("ha_partition::records_in_range");
file= m_file;
do
{
if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
{
ha_rows tmp_in_range= (*file)->records_in_range(inx, min_key, max_key);
if (tmp_in_range == HA_POS_ERROR)
DBUG_RETURN(tmp_in_range);
in_range+= tmp_in_range;
}
} while (*(++file));
DBUG_RETURN(in_range);
DBUG_RETURN(estimate_rows(TRUE, inx, min_key, max_key));
}
......@@ -5855,22 +5883,36 @@ ha_rows ha_partition::records_in_range(uint inx, key_range *min_key,
ha_rows ha_partition::estimate_rows_upper_bound()
{
ha_rows rows, tot_rows= 0;
handler **file;
DBUG_ENTER("ha_partition::estimate_rows_upper_bound");
file= m_file;
do
{
if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
{
rows= (*file)->estimate_rows_upper_bound();
if (rows == HA_POS_ERROR)
DBUG_RETURN(HA_POS_ERROR);
tot_rows+= rows;
}
} while (*(++file));
DBUG_RETURN(tot_rows);
DBUG_RETURN(estimate_rows(FALSE, 0, NULL, NULL));
}
/*
Get time to read
SYNOPSIS
read_time()
index Index number used
ranges Number of ranges
rows Number of rows
RETURN VALUE
time for read
DESCRIPTION
This will be optimised later to include whether or not the index can
be used with partitioning. To achieve we need to add another parameter
that specifies how many of the index fields that are bound in the ranges.
Possibly added as a new call to handlers.
*/
double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
{
DBUG_ENTER("ha_partition::read_time");
DBUG_RETURN(m_file[0]->read_time(index, ranges, rows));
}
......
......@@ -547,6 +547,18 @@ public:
-------------------------------------------------------------------------
*/
private:
/*
Helper function to get the minimum number of partitions to use for
the optimizer hints/cost calls.
*/
void partitions_optimizer_call_preparations(uint *num_used_parts,
uint *check_min_num,
uint *first);
ha_rows estimate_rows(bool is_records_in_range, uint inx,
key_range *min_key, key_range *max_key);
public:
/*
keys_to_use_for_scanning can probably be implemented as the
intersection of all underlying handlers if mixed handlers are used.
......
......@@ -606,7 +606,7 @@ void Item_func::signal_divide_by_null()
Item *Item_func::get_tmp_table_item(THD *thd)
{
if (!with_sum_func && !const_item() && functype() != SUSERVAR_FUNC)
if (!with_sum_func && !const_item())
return new Item_field(result_field);
return copy_or_same(thd);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment