Commit b38ff28a authored by Sergei Golubchik's avatar Sergei Golubchik

bugfix: mark_columns_needed_for_update

cannot use TABLE:merge_keys for that, because Field::part_of_key
was designed to mark fields for KEY_READ, so a field is not a
"part of key", if only prefix of the field is.
parent d137b4db
...@@ -430,9 +430,9 @@ call p_verify_status_increment(2, 2, 2, 2); ...@@ -430,9 +430,9 @@ call p_verify_status_increment(2, 2, 2, 2);
--echo # 4. Read-write statement: UPDATE, update 0 rows, 1 row matches WHERE --echo # 4. Read-write statement: UPDATE, update 0 rows, 1 row matches WHERE
--echo # --echo #
update t1 set a=2; update t1 set a=2;
call p_verify_status_increment(2, 2, 1, 0); call p_verify_status_increment(2, 0, 1, 0);
commit; commit;
call p_verify_status_increment(2, 2, 1, 0); call p_verify_status_increment(2, 0, 1, 0);
--echo # 5. Read-write statement: UPDATE, update 0 rows, 0 rows match WHERE --echo # 5. Read-write statement: UPDATE, update 0 rows, 0 rows match WHERE
--echo # --echo #
......
...@@ -419,11 +419,11 @@ SUCCESS ...@@ -419,11 +419,11 @@ SUCCESS
# 4. Read-write statement: UPDATE, update 0 rows, 1 row matches WHERE # 4. Read-write statement: UPDATE, update 0 rows, 1 row matches WHERE
# #
update t1 set a=2; update t1 set a=2;
call p_verify_status_increment(2, 2, 1, 0); call p_verify_status_increment(2, 0, 1, 0);
SUCCESS SUCCESS
commit; commit;
call p_verify_status_increment(2, 2, 1, 0); call p_verify_status_increment(2, 0, 1, 0);
SUCCESS SUCCESS
# 5. Read-write statement: UPDATE, update 0 rows, 0 rows match WHERE # 5. Read-write statement: UPDATE, update 0 rows, 0 rows match WHERE
......
...@@ -8,3 +8,30 @@ select * from t1; ...@@ -8,3 +8,30 @@ select * from t1;
a b c a b c
2 3 4 2 3 4
drop table t1; drop table t1;
create table t1 (a int, c int as(a), p varchar(20) as(y), y char(20), index (p,c));
insert into t1 (a,y) values(1, "yyy");
update t1 set a = 100 where a = 1;
drop table t1;
create table t1 (
a varchar(10000),
b varchar(3000),
c varchar(14000) generated always as (concat(a,b)) virtual,
d varchar(5000) generated always as (b) virtual,
e int(11) generated always as (10) virtual,
h int(11) not null primary key,
index(c(100), d(20)));
insert t1 (a,b,h) values (repeat('g', 10000), repeat('x', 2800), 1);
update t1 set a = repeat(cast(1 as char), 2000);
drop table t1;
create table t1 (
a varchar(10000),
b varchar(3000),
c varchar(14000) generated always as (concat(a,b)) virtual,
i varchar(5000) generated always as (b) virtual,
d varchar(5000) generated always as (i) virtual,
e int(11) generated always as (10) virtual,
h int(11) not null primary key,
index(c(100), d(20)));
insert t1 (a,b,h) values (repeat('g', 10000), repeat('x', 2800), 1);
update t1 set a = repeat(cast(1 as char), 2000);
drop table t1;
...@@ -10,3 +10,39 @@ select * from t1; ...@@ -10,3 +10,39 @@ select * from t1;
update t1 set a=2; update t1 set a=2;
select * from t1; select * from t1;
drop table t1; drop table t1;
#
# one keypart is virtual, the other keypart is updated
# this tests TABLE::mark_columns_needed_for_update()
#
create table t1 (a int, c int as(a), p varchar(20) as(y), y char(20), index (p,c));
insert into t1 (a,y) values(1, "yyy");
update t1 set a = 100 where a = 1;
drop table t1;
#
# note: prefix keys below
#
create table t1 (
a varchar(10000),
b varchar(3000),
c varchar(14000) generated always as (concat(a,b)) virtual,
d varchar(5000) generated always as (b) virtual,
e int(11) generated always as (10) virtual,
h int(11) not null primary key,
index(c(100), d(20)));
insert t1 (a,b,h) values (repeat('g', 10000), repeat('x', 2800), 1);
update t1 set a = repeat(cast(1 as char), 2000);
drop table t1;
create table t1 (
a varchar(10000),
b varchar(3000),
c varchar(14000) generated always as (concat(a,b)) virtual,
i varchar(5000) generated always as (b) virtual,
d varchar(5000) generated always as (i) virtual,
e int(11) generated always as (10) virtual,
h int(11) not null primary key,
index(c(100), d(20)));
insert t1 (a,b,h) values (repeat('g', 10000), repeat('x', 2800), 1);
update t1 set a = repeat(cast(1 as char), 2000);
drop table t1;
...@@ -6272,15 +6272,34 @@ void TABLE::mark_columns_needed_for_update() ...@@ -6272,15 +6272,34 @@ void TABLE::mark_columns_needed_for_update()
if (triggers) if (triggers)
triggers->mark_fields_used(TRG_EVENT_UPDATE); triggers->mark_fields_used(TRG_EVENT_UPDATE);
if (default_field)
mark_default_fields_for_write(FALSE);
if (vfield)
need_signal|= mark_virtual_columns_for_write(FALSE);
if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE) if (file->ha_table_flags() & HA_REQUIRES_KEY_COLUMNS_FOR_DELETE)
{ {
/* Mark all used key columns for read */ KEY *end= key_info + s->keys;
Field **reg_field; for (KEY *k= key_info; k < end; k++)
for (reg_field= field ; *reg_field ; reg_field++)
{ {
/* Merge keys is all keys that had a column refered to in the query */ KEY_PART_INFO *kpend= k->key_part + k->ext_key_parts;
if (merge_keys.is_overlapping((*reg_field)->part_of_key)) bool any_written= false, all_read= true;
bitmap_set_bit(read_set, (*reg_field)->field_index); for (KEY_PART_INFO *kp= k->key_part; kp < kpend; kp++)
{
int idx= kp->fieldnr - 1;
any_written|= bitmap_is_set(write_set, idx);
all_read&= bitmap_is_set(read_set, idx);
}
if (any_written && !all_read)
{
for (KEY_PART_INFO *kp= k->key_part; kp < kpend; kp++)
{
int idx= kp->fieldnr - 1;
if (bitmap_fast_test_and_set(read_set, idx))
continue;
if (field[idx]->vcol_info)
mark_virtual_col(field[idx]);
}
}
} }
need_signal= true; need_signal= true;
} }
...@@ -6299,11 +6318,6 @@ void TABLE::mark_columns_needed_for_update() ...@@ -6299,11 +6318,6 @@ void TABLE::mark_columns_needed_for_update()
need_signal= true; need_signal= true;
} }
} }
if (default_field)
mark_default_fields_for_write(FALSE);
/* Mark all virtual columns needed for update */
if (vfield)
need_signal|= mark_virtual_columns_for_write(FALSE);
if (check_constraints) if (check_constraints)
{ {
mark_check_constraint_columns_for_read(); mark_check_constraint_columns_for_read();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment