Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
b9a41928
Commit
b9a41928
authored
Dec 19, 2007
by
kostja@bodhi.(none)
Browse files
Options
Browse Files
Download
Plain Diff
Merge bk-internal.mysql.com:/home/bk/mysql-5.1-runtime
into bodhi.(none):/opt/local/work/mysql-5.1-runtime
parents
1a5b69ec
15c04b2d
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
127 additions
and
38 deletions
+127
-38
mysql-test/r/bdb_notembedded.result
mysql-test/r/bdb_notembedded.result
+35
-0
mysql-test/t/bdb_notembedded.test
mysql-test/t/bdb_notembedded.test
+38
-0
sql/ha_partition.cc
sql/ha_partition.cc
+23
-6
sql/handler.h
sql/handler.h
+15
-16
sql/sql_select.cc
sql/sql_select.cc
+15
-15
sql/sql_table.cc
sql/sql_table.cc
+1
-1
No files found.
mysql-test/r/bdb_notembedded.result
0 → 100644
View file @
b9a41928
set autocommit=1;
reset master;
create table bug16206 (a int);
insert into bug16206 values(1);
start transaction;
insert into bug16206 values(2);
commit;
show binlog events;
Log_name Pos Event_type Server_id End_log_pos Info
f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4
f n Query 1 n use `test`; create table bug16206 (a int)
f n Query 1 n use `test`; insert into bug16206 values(1)
f n Query 1 n use `test`; insert into bug16206 values(2)
drop table bug16206;
reset master;
create table bug16206 (a int) engine= bdb;
insert into bug16206 values(0);
insert into bug16206 values(1);
start transaction;
insert into bug16206 values(2);
commit;
insert into bug16206 values(3);
show binlog events;
Log_name Pos Event_type Server_id End_log_pos Info
f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4
f n Query 1 n use `test`; create table bug16206 (a int) engine= bdb
f n Query 1 n use `test`; insert into bug16206 values(0)
f n Query 1 n use `test`; insert into bug16206 values(1)
f n Query 1 n use `test`; BEGIN
f n Query 1 n use `test`; insert into bug16206 values(2)
f n Query 1 n use `test`; COMMIT
f n Query 1 n use `test`; insert into bug16206 values(3)
drop table bug16206;
set autocommit=0;
End of 5.0 tests
mysql-test/t/bdb_notembedded.test
0 → 100644
View file @
b9a41928
--
source
include
/
not_embedded
.
inc
--
source
include
/
have_bdb
.
inc
#
# Bug #16206: Superfluous COMMIT event in binlog when updating BDB in autocommit mode
#
set
autocommit
=
1
;
let
$VERSION
=
`select version()`
;
reset
master
;
create
table
bug16206
(
a
int
);
insert
into
bug16206
values
(
1
);
start
transaction
;
insert
into
bug16206
values
(
2
);
commit
;
--
replace_result
$VERSION
VERSION
--
replace_column
1
f
2
n
5
n
show
binlog
events
;
drop
table
bug16206
;
reset
master
;
create
table
bug16206
(
a
int
)
engine
=
bdb
;
insert
into
bug16206
values
(
0
);
insert
into
bug16206
values
(
1
);
start
transaction
;
insert
into
bug16206
values
(
2
);
commit
;
insert
into
bug16206
values
(
3
);
--
replace_result
$VERSION
VERSION
--
replace_column
1
f
2
n
5
n
show
binlog
events
;
drop
table
bug16206
;
set
autocommit
=
0
;
--
echo
End
of
5.0
tests
sql/ha_partition.cc
View file @
b9a41928
...
@@ -1574,9 +1574,13 @@ int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
...
@@ -1574,9 +1574,13 @@ int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
}
}
else
else
{
{
THD
*
thd
=
ha_thd
();
/* Copy record to new handler */
/* Copy record to new handler */
copied
++
;
copied
++
;
if
((
result
=
m_new_file
[
new_part
]
->
write_row
(
m_rec0
)))
tmp_disable_binlog
(
thd
);
/* Do not replicate the low-level changes. */
result
=
m_new_file
[
new_part
]
->
ha_write_row
(
m_rec0
);
reenable_binlog
(
thd
);
if
(
result
)
goto
error
;
goto
error
;
}
}
}
}
...
@@ -2694,6 +2698,7 @@ int ha_partition::write_row(uchar * buf)
...
@@ -2694,6 +2698,7 @@ int ha_partition::write_row(uchar * buf)
longlong
func_value
;
longlong
func_value
;
bool
autoincrement_lock
=
FALSE
;
bool
autoincrement_lock
=
FALSE
;
my_bitmap_map
*
old_map
;
my_bitmap_map
*
old_map
;
THD
*
thd
=
ha_thd
();
#ifdef NOT_NEEDED
#ifdef NOT_NEEDED
uchar
*
rec0
=
m_rec0
;
uchar
*
rec0
=
m_rec0
;
#endif
#endif
...
@@ -2765,7 +2770,9 @@ int ha_partition::write_row(uchar * buf)
...
@@ -2765,7 +2770,9 @@ int ha_partition::write_row(uchar * buf)
}
}
m_last_part
=
part_id
;
m_last_part
=
part_id
;
DBUG_PRINT
(
"info"
,
(
"Insert in partition %d"
,
part_id
));
DBUG_PRINT
(
"info"
,
(
"Insert in partition %d"
,
part_id
));
error
=
m_file
[
part_id
]
->
write_row
(
buf
);
tmp_disable_binlog
(
thd
);
/* Do not replicate the low-level changes. */
error
=
m_file
[
part_id
]
->
ha_write_row
(
buf
);
reenable_binlog
(
thd
);
exit:
exit:
if
(
autoincrement_lock
)
if
(
autoincrement_lock
)
pthread_mutex_unlock
(
&
table_share
->
mutex
);
pthread_mutex_unlock
(
&
table_share
->
mutex
);
...
@@ -2806,6 +2813,7 @@ exit:
...
@@ -2806,6 +2813,7 @@ exit:
int
ha_partition
::
update_row
(
const
uchar
*
old_data
,
uchar
*
new_data
)
int
ha_partition
::
update_row
(
const
uchar
*
old_data
,
uchar
*
new_data
)
{
{
THD
*
thd
=
ha_thd
();
uint32
new_part_id
,
old_part_id
;
uint32
new_part_id
,
old_part_id
;
int
error
=
0
;
int
error
=
0
;
longlong
func_value
;
longlong
func_value
;
...
@@ -2840,16 +2848,25 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
...
@@ -2840,16 +2848,25 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
if
(
new_part_id
==
old_part_id
)
if
(
new_part_id
==
old_part_id
)
{
{
DBUG_PRINT
(
"info"
,
(
"Update in partition %d"
,
new_part_id
));
DBUG_PRINT
(
"info"
,
(
"Update in partition %d"
,
new_part_id
));
error
=
m_file
[
new_part_id
]
->
update_row
(
old_data
,
new_data
);
tmp_disable_binlog
(
thd
);
/* Do not replicate the low-level changes. */
error
=
m_file
[
new_part_id
]
->
ha_update_row
(
old_data
,
new_data
);
reenable_binlog
(
thd
);
goto
exit
;
goto
exit
;
}
}
else
else
{
{
DBUG_PRINT
(
"info"
,
(
"Update from partition %d to partition %d"
,
DBUG_PRINT
(
"info"
,
(
"Update from partition %d to partition %d"
,
old_part_id
,
new_part_id
));
old_part_id
,
new_part_id
));
if
((
error
=
m_file
[
new_part_id
]
->
write_row
(
new_data
)))
tmp_disable_binlog
(
thd
);
/* Do not replicate the low-level changes. */
error
=
m_file
[
new_part_id
]
->
ha_write_row
(
new_data
);
reenable_binlog
(
thd
);
if
(
error
)
goto
exit
;
goto
exit
;
if
((
error
=
m_file
[
old_part_id
]
->
delete_row
(
old_data
)))
tmp_disable_binlog
(
thd
);
/* Do not replicate the low-level changes. */
error
=
m_file
[
old_part_id
]
->
ha_delete_row
(
old_data
);
reenable_binlog
(
thd
);
if
(
error
)
{
{
#ifdef IN_THE_FUTURE
#ifdef IN_THE_FUTURE
(
void
)
m_file
[
new_part_id
]
->
delete_last_inserted_row
(
new_data
);
(
void
)
m_file
[
new_part_id
]
->
delete_last_inserted_row
(
new_data
);
...
@@ -3980,7 +3997,7 @@ int ha_partition::partition_scan_set_up(uchar * buf, bool idx_read_flag)
...
@@ -3980,7 +3997,7 @@ int ha_partition::partition_scan_set_up(uchar * buf, bool idx_read_flag)
int
ha_partition
::
handle_unordered_next
(
uchar
*
buf
,
bool
is_next_same
)
int
ha_partition
::
handle_unordered_next
(
uchar
*
buf
,
bool
is_next_same
)
{
{
handler
*
file
=
file
=
m_file
[
m_part_spec
.
start_part
];
handler
*
file
=
m_file
[
m_part_spec
.
start_part
];
int
error
;
int
error
;
DBUG_ENTER
(
"ha_partition::handle_unordered_next"
);
DBUG_ENTER
(
"ha_partition::handle_unordered_next"
);
...
...
sql/handler.h
View file @
b9a41928
...
@@ -1674,22 +1674,6 @@ public:
...
@@ -1674,22 +1674,6 @@ public:
uint
table_changes
)
uint
table_changes
)
{
return
COMPATIBLE_DATA_NO
;
}
{
return
COMPATIBLE_DATA_NO
;
}
/** These are only called from sql_select for internal temporary tables */
virtual
int
write_row
(
uchar
*
buf
__attribute__
((
unused
)))
{
return
HA_ERR_WRONG_COMMAND
;
}
virtual
int
update_row
(
const
uchar
*
old_data
__attribute__
((
unused
)),
uchar
*
new_data
__attribute__
((
unused
)))
{
return
HA_ERR_WRONG_COMMAND
;
}
virtual
int
delete_row
(
const
uchar
*
buf
__attribute__
((
unused
)))
{
return
HA_ERR_WRONG_COMMAND
;
}
/**
/**
use_hidden_primary_key() is called in case of an update/delete when
use_hidden_primary_key() is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
...
@@ -1721,6 +1705,21 @@ private:
...
@@ -1721,6 +1705,21 @@ private:
*/
*/
virtual
int
rnd_init
(
bool
scan
)
=
0
;
virtual
int
rnd_init
(
bool
scan
)
=
0
;
virtual
int
rnd_end
()
{
return
0
;
}
virtual
int
rnd_end
()
{
return
0
;
}
virtual
int
write_row
(
uchar
*
buf
__attribute__
((
unused
)))
{
return
HA_ERR_WRONG_COMMAND
;
}
virtual
int
update_row
(
const
uchar
*
old_data
__attribute__
((
unused
)),
uchar
*
new_data
__attribute__
((
unused
)))
{
return
HA_ERR_WRONG_COMMAND
;
}
virtual
int
delete_row
(
const
uchar
*
buf
__attribute__
((
unused
)))
{
return
HA_ERR_WRONG_COMMAND
;
}
/**
/**
Reset state of file to after 'open'.
Reset state of file to after 'open'.
This function is called after every statement for all tables used
This function is called after every statement for all tables used
...
...
sql/sql_select.cc
View file @
b9a41928
...
@@ -10554,13 +10554,13 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
...
@@ -10554,13 +10554,13 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
*/
*/
while
(
!
table
->
file
->
rnd_next
(
new_table
.
record
[
1
]))
while
(
!
table
->
file
->
rnd_next
(
new_table
.
record
[
1
]))
{
{
write_err
=
new_table
.
file
->
write_row
(
new_table
.
record
[
1
]);
write_err
=
new_table
.
file
->
ha_
write_row
(
new_table
.
record
[
1
]);
DBUG_EXECUTE_IF
(
"raise_error"
,
write_err
=
HA_ERR_FOUND_DUPP_KEY
;);
DBUG_EXECUTE_IF
(
"raise_error"
,
write_err
=
HA_ERR_FOUND_DUPP_KEY
;);
if
(
write_err
)
if
(
write_err
)
goto
err
;
goto
err
;
}
}
/* copy row that filled HEAP table */
/* copy row that filled HEAP table */
if
((
write_err
=
new_table
.
file
->
write_row
(
table
->
record
[
0
])))
if
((
write_err
=
new_table
.
file
->
ha_
write_row
(
table
->
record
[
0
])))
{
{
if
(
new_table
.
file
->
is_fatal_error
(
write_err
,
HA_CHECK_DUP
)
||
if
(
new_table
.
file
->
is_fatal_error
(
write_err
,
HA_CHECK_DUP
)
||
!
ignore_last_dupp_key_error
)
!
ignore_last_dupp_key_error
)
...
@@ -12023,7 +12023,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
...
@@ -12023,7 +12023,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
{
int
error
;
int
error
;
join
->
found_records
++
;
join
->
found_records
++
;
if
((
error
=
table
->
file
->
write_row
(
table
->
record
[
0
])))
if
((
error
=
table
->
file
->
ha_
write_row
(
table
->
record
[
0
])))
{
{
if
(
!
table
->
file
->
is_fatal_error
(
error
,
HA_CHECK_DUP
))
if
(
!
table
->
file
->
is_fatal_error
(
error
,
HA_CHECK_DUP
))
goto
end
;
goto
end
;
...
@@ -12085,7 +12085,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
...
@@ -12085,7 +12085,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
/* Update old record */
{
/* Update old record */
restore_record
(
table
,
record
[
1
]);
restore_record
(
table
,
record
[
1
]);
update_tmptable_sum_func
(
join
->
sum_funcs
,
table
);
update_tmptable_sum_func
(
join
->
sum_funcs
,
table
);
if
((
error
=
table
->
file
->
update_row
(
table
->
record
[
1
],
if
((
error
=
table
->
file
->
ha_
update_row
(
table
->
record
[
1
],
table
->
record
[
0
])))
table
->
record
[
0
])))
{
{
table
->
file
->
print_error
(
error
,
MYF
(
0
));
/* purecov: inspected */
table
->
file
->
print_error
(
error
,
MYF
(
0
));
/* purecov: inspected */
...
@@ -12109,7 +12109,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
...
@@ -12109,7 +12109,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
}
init_tmptable_sum_functions
(
join
->
sum_funcs
);
init_tmptable_sum_functions
(
join
->
sum_funcs
);
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
);
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
);
if
((
error
=
table
->
file
->
write_row
(
table
->
record
[
0
])))
if
((
error
=
table
->
file
->
ha_
write_row
(
table
->
record
[
0
])))
{
{
if
(
create_myisam_from_heap
(
join
->
thd
,
table
,
&
join
->
tmp_table_param
,
if
(
create_myisam_from_heap
(
join
->
thd
,
table
,
&
join
->
tmp_table_param
,
error
,
0
))
error
,
0
))
...
@@ -12145,7 +12145,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
...
@@ -12145,7 +12145,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
copy_fields
(
&
join
->
tmp_table_param
);
// Groups are copied twice.
copy_fields
(
&
join
->
tmp_table_param
);
// Groups are copied twice.
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
);
copy_funcs
(
join
->
tmp_table_param
.
items_to_copy
);
if
(
!
(
error
=
table
->
file
->
write_row
(
table
->
record
[
0
])))
if
(
!
(
error
=
table
->
file
->
ha_
write_row
(
table
->
record
[
0
])))
join
->
send_records
++
;
// New group
join
->
send_records
++
;
// New group
else
else
{
{
...
@@ -12161,7 +12161,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
...
@@ -12161,7 +12161,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
}
restore_record
(
table
,
record
[
1
]);
restore_record
(
table
,
record
[
1
]);
update_tmptable_sum_func
(
join
->
sum_funcs
,
table
);
update_tmptable_sum_func
(
join
->
sum_funcs
,
table
);
if
((
error
=
table
->
file
->
update_row
(
table
->
record
[
1
],
if
((
error
=
table
->
file
->
ha_
update_row
(
table
->
record
[
1
],
table
->
record
[
0
])))
table
->
record
[
0
])))
{
{
table
->
file
->
print_error
(
error
,
MYF
(
0
));
/* purecov: inspected */
table
->
file
->
print_error
(
error
,
MYF
(
0
));
/* purecov: inspected */
...
@@ -12205,7 +12205,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
...
@@ -12205,7 +12205,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
join
->
sum_funcs_end
[
send_group_parts
]);
join
->
sum_funcs_end
[
send_group_parts
]);
if
(
!
join
->
having
||
join
->
having
->
val_int
())
if
(
!
join
->
having
||
join
->
having
->
val_int
())
{
{
int
error
=
table
->
file
->
write_row
(
table
->
record
[
0
]);
int
error
=
table
->
file
->
ha_
write_row
(
table
->
record
[
0
]);
if
(
error
&&
create_myisam_from_heap
(
join
->
thd
,
table
,
if
(
error
&&
create_myisam_from_heap
(
join
->
thd
,
table
,
&
join
->
tmp_table_param
,
&
join
->
tmp_table_param
,
error
,
0
))
error
,
0
))
...
@@ -13433,7 +13433,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
...
@@ -13433,7 +13433,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
}
if
(
having
&&
!
having
->
val_int
())
if
(
having
&&
!
having
->
val_int
())
{
{
if
((
error
=
file
->
delete_row
(
record
)))
if
((
error
=
file
->
ha_
delete_row
(
record
)))
goto
err
;
goto
err
;
error
=
file
->
rnd_next
(
record
);
error
=
file
->
rnd_next
(
record
);
continue
;
continue
;
...
@@ -13460,7 +13460,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
...
@@ -13460,7 +13460,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
}
if
(
compare_record
(
table
,
first_field
)
==
0
)
if
(
compare_record
(
table
,
first_field
)
==
0
)
{
{
if
((
error
=
file
->
delete_row
(
record
)))
if
((
error
=
file
->
ha_
delete_row
(
record
)))
goto
err
;
goto
err
;
}
}
else
if
(
!
found
)
else
if
(
!
found
)
...
@@ -13557,7 +13557,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
...
@@ -13557,7 +13557,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
}
}
if
(
having
&&
!
having
->
val_int
())
if
(
having
&&
!
having
->
val_int
())
{
{
if
((
error
=
file
->
delete_row
(
record
)))
if
((
error
=
file
->
ha_
delete_row
(
record
)))
goto
err
;
goto
err
;
continue
;
continue
;
}
}
...
@@ -13574,7 +13574,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
...
@@ -13574,7 +13574,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if
(
hash_search
(
&
hash
,
org_key_pos
,
key_length
))
if
(
hash_search
(
&
hash
,
org_key_pos
,
key_length
))
{
{
/* Duplicated found ; Remove the row */
/* Duplicated found ; Remove the row */
if
((
error
=
file
->
delete_row
(
record
)))
if
((
error
=
file
->
ha_
delete_row
(
record
)))
goto
err
;
goto
err
;
}
}
else
else
...
@@ -15582,7 +15582,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
...
@@ -15582,7 +15582,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
item
->
save_in_result_field
(
1
);
item
->
save_in_result_field
(
1
);
}
}
copy_sum_funcs
(
sum_funcs_end
[
i
+
1
],
sum_funcs_end
[
i
]);
copy_sum_funcs
(
sum_funcs_end
[
i
+
1
],
sum_funcs_end
[
i
]);
if
((
write_error
=
table_arg
->
file
->
write_row
(
table_arg
->
record
[
0
])))
if
((
write_error
=
table_arg
->
file
->
ha_
write_row
(
table_arg
->
record
[
0
])))
{
{
if
(
create_myisam_from_heap
(
thd
,
table_arg
,
&
tmp_table_param
,
if
(
create_myisam_from_heap
(
thd
,
table_arg
,
&
tmp_table_param
,
write_error
,
0
))
write_error
,
0
))
...
...
sql/sql_table.cc
View file @
b9a41928
...
@@ -7059,7 +7059,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
...
@@ -7059,7 +7059,7 @@ copy_data_between_tables(TABLE *from,TABLE *to,
copy_ptr
->
do_copy
(
copy_ptr
);
copy_ptr
->
do_copy
(
copy_ptr
);
}
}
prev_insert_id
=
to
->
file
->
next_insert_id
;
prev_insert_id
=
to
->
file
->
next_insert_id
;
error
=
to
->
file
->
write_row
(
to
->
record
[
0
]);
error
=
to
->
file
->
ha_
write_row
(
to
->
record
[
0
]);
to
->
auto_increment_field_not_null
=
FALSE
;
to
->
auto_increment_field_not_null
=
FALSE
;
if
(
error
)
if
(
error
)
{
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment