Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
mariadb
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
mariadb
Commits
878ba474
Commit
878ba474
authored
Dec 11, 2013
by
Rich Prohaska
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
port to mysql 5.6.15
parent
195e6219
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
27 additions
and
25 deletions
+27
-25
storage/tokudb/ha_tokudb.cc
storage/tokudb/ha_tokudb.cc
+13
-14
storage/tokudb/ha_tokudb_admin.cc
storage/tokudb/ha_tokudb_admin.cc
+2
-2
storage/tokudb/ha_tokudb_alter_56.cc
storage/tokudb/ha_tokudb_alter_56.cc
+6
-6
storage/tokudb/ha_tokudb_update.cc
storage/tokudb/ha_tokudb_update.cc
+3
-3
storage/tokudb/hatoku_defines.h
storage/tokudb/hatoku_defines.h
+3
-0
No files found.
storage/tokudb/ha_tokudb.cc
View file @
878ba474
...
@@ -1507,8 +1507,7 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K
...
@@ -1507,8 +1507,7 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K
kc_info
->
num_offset_bytes
=
2
;
kc_info
->
num_offset_bytes
=
2
;
}
}
for
(
uint
i
=
0
;
i
<
table_share
->
keys
+
tokudb_test
(
hidden_primary_key
);
i
++
)
{
for
(
uint
i
=
0
;
i
<
table_share
->
keys
+
test
(
hidden_primary_key
);
i
++
)
{
//
//
// do the cluster/primary key filtering calculations
// do the cluster/primary key filtering calculations
//
//
...
@@ -1551,7 +1550,7 @@ exit:
...
@@ -1551,7 +1550,7 @@ exit:
}
}
bool
ha_tokudb
::
can_replace_into_be_fast
(
TABLE_SHARE
*
table_share
,
KEY_AND_COL_INFO
*
kc_info
,
uint
pk
)
{
bool
ha_tokudb
::
can_replace_into_be_fast
(
TABLE_SHARE
*
table_share
,
KEY_AND_COL_INFO
*
kc_info
,
uint
pk
)
{
uint
curr_num_DBs
=
table_share
->
keys
+
test
(
hidden_primary_key
);
uint
curr_num_DBs
=
table_share
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
bool
ret_val
;
bool
ret_val
;
if
(
curr_num_DBs
==
1
)
{
if
(
curr_num_DBs
==
1
)
{
ret_val
=
true
;
ret_val
=
true
;
...
@@ -1717,7 +1716,7 @@ int ha_tokudb::initialize_share(
...
@@ -1717,7 +1716,7 @@ int ha_tokudb::initialize_share(
share
->
try_table_lock
=
false
;
share
->
try_table_lock
=
false
;
}
}
share
->
num_DBs
=
table_share
->
keys
+
test
(
hidden_primary_key
);
share
->
num_DBs
=
table_share
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
error
=
0
;
error
=
0
;
exit:
exit:
...
@@ -2804,7 +2803,7 @@ DBT *ha_tokudb::pack_key(
...
@@ -2804,7 +2803,7 @@ DBT *ha_tokudb::pack_key(
{
{
TOKUDB_DBUG_ENTER
(
"ha_tokudb::pack_key"
);
TOKUDB_DBUG_ENTER
(
"ha_tokudb::pack_key"
);
#if TOKU_INCLUDE_EXTENDED_KEYS
#if TOKU_INCLUDE_EXTENDED_KEYS
if
(
keynr
!=
primary_key
&&
!
test
(
hidden_primary_key
))
{
if
(
keynr
!=
primary_key
&&
!
t
okudb_t
est
(
hidden_primary_key
))
{
DBUG_RETURN
(
pack_ext_key
(
key
,
keynr
,
buff
,
key_ptr
,
key_length
,
inf_byte
));
DBUG_RETURN
(
pack_ext_key
(
key
,
keynr
,
buff
,
key_ptr
,
key_length
,
inf_byte
));
}
}
#endif
#endif
...
@@ -3229,7 +3228,7 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) {
...
@@ -3229,7 +3228,7 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) {
abort_loader
=
false
;
abort_loader
=
false
;
rw_rdlock
(
&
share
->
num_DBs_lock
);
rw_rdlock
(
&
share
->
num_DBs_lock
);
uint
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
uint
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
num_DBs_locked_in_bulk
=
true
;
num_DBs_locked_in_bulk
=
true
;
lock_count
=
0
;
lock_count
=
0
;
...
@@ -3743,7 +3742,7 @@ void ha_tokudb::set_main_dict_put_flags(
...
@@ -3743,7 +3742,7 @@ void ha_tokudb::set_main_dict_put_flags(
)
)
{
{
uint32_t
old_prelock_flags
=
0
;
uint32_t
old_prelock_flags
=
0
;
uint
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
uint
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
bool
in_hot_index
=
share
->
num_DBs
>
curr_num_DBs
;
bool
in_hot_index
=
share
->
num_DBs
>
curr_num_DBs
;
bool
using_ignore_flag_opt
=
do_ignore_flag_optimization
(
bool
using_ignore_flag_opt
=
do_ignore_flag_optimization
(
thd
,
table
,
share
->
replace_into_fast
);
thd
,
table
,
share
->
replace_into_fast
);
...
@@ -3787,7 +3786,7 @@ int ha_tokudb::insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk
...
@@ -3787,7 +3786,7 @@ int ha_tokudb::insert_row_to_main_dictionary(uchar* record, DBT* pk_key, DBT* pk
int
error
=
0
;
int
error
=
0
;
uint32_t
put_flags
=
mult_put_flags
[
primary_key
];
uint32_t
put_flags
=
mult_put_flags
[
primary_key
];
THD
*
thd
=
ha_thd
();
THD
*
thd
=
ha_thd
();
uint
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
uint
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
assert
(
curr_num_DBs
==
1
);
assert
(
curr_num_DBs
==
1
);
...
@@ -3997,7 +3996,7 @@ int ha_tokudb::write_row(uchar * record) {
...
@@ -3997,7 +3996,7 @@ int ha_tokudb::write_row(uchar * record) {
// for #4633
// for #4633
// if we have a duplicate key error, let's check the primary key to see
// if we have a duplicate key error, let's check the primary key to see
// if there is a duplicate there. If so, set last_dup_key to the pk
// if there is a duplicate there. If so, set last_dup_key to the pk
if
(
error
==
DB_KEYEXIST
&&
!
test
(
hidden_primary_key
)
&&
last_dup_key
!=
primary_key
)
{
if
(
error
==
DB_KEYEXIST
&&
!
t
okudb_t
est
(
hidden_primary_key
)
&&
last_dup_key
!=
primary_key
)
{
int
r
=
share
->
file
->
getf_set
(
int
r
=
share
->
file
->
getf_set
(
share
->
file
,
share
->
file
,
txn
,
txn
,
...
@@ -5829,7 +5828,7 @@ int ha_tokudb::info(uint flag) {
...
@@ -5829,7 +5828,7 @@ int ha_tokudb::info(uint flag) {
TOKUDB_DBUG_ENTER
(
"ha_tokudb::info %p %d %lld"
,
this
,
flag
,
(
long
long
)
share
->
rows
);
TOKUDB_DBUG_ENTER
(
"ha_tokudb::info %p %d %lld"
,
this
,
flag
,
(
long
long
)
share
->
rows
);
int
error
;
int
error
;
DB_TXN
*
txn
=
NULL
;
DB_TXN
*
txn
=
NULL
;
uint
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
uint
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
DB_BTREE_STAT64
dict_stats
;
DB_BTREE_STAT64
dict_stats
;
if
(
flag
&
HA_STATUS_VARIABLE
)
{
if
(
flag
&
HA_STATUS_VARIABLE
)
{
// Just to get optimizations right
// Just to get optimizations right
...
@@ -6337,7 +6336,7 @@ THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_l
...
@@ -6337,7 +6336,7 @@ THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_l
// if creating a hot index
// if creating a hot index
if
(
thd_sql_command
(
thd
)
==
SQLCOM_CREATE_INDEX
&&
get_create_index_online
(
thd
))
{
if
(
thd_sql_command
(
thd
)
==
SQLCOM_CREATE_INDEX
&&
get_create_index_online
(
thd
))
{
rw_rdlock
(
&
share
->
num_DBs_lock
);
rw_rdlock
(
&
share
->
num_DBs_lock
);
if
(
share
->
num_DBs
==
(
table
->
s
->
keys
+
test
(
hidden_primary_key
)))
{
if
(
share
->
num_DBs
==
(
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
)))
{
lock_type
=
TL_WRITE_ALLOW_WRITE
;
lock_type
=
TL_WRITE_ALLOW_WRITE
;
}
}
lock
.
type
=
lock_type
;
lock
.
type
=
lock_type
;
...
@@ -7589,7 +7588,7 @@ int ha_tokudb::tokudb_add_index(
...
@@ -7589,7 +7588,7 @@ int ha_tokudb::tokudb_add_index(
//
//
// number of DB files we have open currently, before add_index is executed
// number of DB files we have open currently, before add_index is executed
//
//
uint
curr_num_DBs
=
table_arg
->
s
->
keys
+
test
(
hidden_primary_key
);
uint
curr_num_DBs
=
table_arg
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
//
//
// get the row type to use for the indexes we're adding
// get the row type to use for the indexes we're adding
...
@@ -7929,7 +7928,7 @@ To add indexes, make sure no transactions touch the table.", share->table_name);
...
@@ -7929,7 +7928,7 @@ To add indexes, make sure no transactions touch the table.", share->table_name);
// Closes added indexes in case of error in error path of add_index and alter_table_phase2
// Closes added indexes in case of error in error path of add_index and alter_table_phase2
//
//
void
ha_tokudb
::
restore_add_index
(
TABLE
*
table_arg
,
uint
num_of_keys
,
bool
incremented_numDBs
,
bool
modified_DBs
)
{
void
ha_tokudb
::
restore_add_index
(
TABLE
*
table_arg
,
uint
num_of_keys
,
bool
incremented_numDBs
,
bool
modified_DBs
)
{
uint
curr_num_DBs
=
table_arg
->
s
->
keys
+
test
(
hidden_primary_key
);
uint
curr_num_DBs
=
table_arg
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
uint
curr_index
=
0
;
uint
curr_index
=
0
;
//
//
...
@@ -8150,7 +8149,7 @@ int ha_tokudb::delete_all_rows_internal() {
...
@@ -8150,7 +8149,7 @@ int ha_tokudb::delete_all_rows_internal() {
error
=
txn_begin
(
db_env
,
0
,
&
txn
,
0
,
ha_thd
());
error
=
txn_begin
(
db_env
,
0
,
&
txn
,
0
,
ha_thd
());
if
(
error
)
{
goto
cleanup
;
}
if
(
error
)
{
goto
cleanup
;
}
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
for
(
uint
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
for
(
uint
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
error
=
share
->
key_file
[
i
]
->
pre_acquire_fileops_lock
(
error
=
share
->
key_file
[
i
]
->
pre_acquire_fileops_lock
(
share
->
key_file
[
i
],
share
->
key_file
[
i
],
...
...
storage/tokudb/ha_tokudb_admin.cc
View file @
878ba474
...
@@ -200,7 +200,7 @@ int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) {
...
@@ -200,7 +200,7 @@ int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) {
while
(
ha_tokudb_optimize_wait
)
sleep
(
1
);
// debug
while
(
ha_tokudb_optimize_wait
)
sleep
(
1
);
// debug
int
error
;
int
error
;
uint
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
uint
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
#ifdef HA_TOKUDB_HAS_THD_PROGRESS
// each DB is its own stage. as HOT goes through each db, we'll
// each DB is its own stage. as HOT goes through each db, we'll
...
@@ -290,7 +290,7 @@ int ha_tokudb::check(THD *thd, HA_CHECK_OPT *check_opt) {
...
@@ -290,7 +290,7 @@ int ha_tokudb::check(THD *thd, HA_CHECK_OPT *check_opt) {
if
(
r
!=
0
)
if
(
r
!=
0
)
result
=
HA_ADMIN_INTERNAL_ERROR
;
result
=
HA_ADMIN_INTERNAL_ERROR
;
if
(
result
==
HA_ADMIN_OK
)
{
if
(
result
==
HA_ADMIN_OK
)
{
uint32_t
num_DBs
=
table_share
->
keys
+
test
(
hidden_primary_key
);
uint32_t
num_DBs
=
table_share
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
snprintf
(
write_status_msg
,
sizeof
write_status_msg
,
"%s primary=%d num=%d"
,
share
->
table_name
,
primary_key
,
num_DBs
);
snprintf
(
write_status_msg
,
sizeof
write_status_msg
,
"%s primary=%d num=%d"
,
share
->
table_name
,
primary_key
,
num_DBs
);
if
(
tokudb_debug
&
TOKUDB_DEBUG_CHECK
)
{
if
(
tokudb_debug
&
TOKUDB_DEBUG_CHECK
)
{
ha_tokudb_check_info
(
thd
,
table
,
write_status_msg
);
ha_tokudb_check_info
(
thd
,
table
,
write_status_msg
);
...
...
storage/tokudb/ha_tokudb_alter_56.cc
View file @
878ba474
...
@@ -473,7 +473,7 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha
...
@@ -473,7 +473,7 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha
// Set the new compression
// Set the new compression
enum
toku_compression_method
method
=
row_type_to_compression_method
(
create_info
->
row_type
);
enum
toku_compression_method
method
=
row_type_to_compression_method
(
create_info
->
row_type
);
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
for
(
uint32_t
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
for
(
uint32_t
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
db
=
share
->
key_file
[
i
];
db
=
share
->
key_file
[
i
];
error
=
db
->
change_compression_method
(
db
,
method
);
error
=
db
->
change_compression_method
(
db
,
method
);
...
@@ -597,7 +597,7 @@ int ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplac
...
@@ -597,7 +597,7 @@ int ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplac
uint32_t
max_column_extra_size
;
uint32_t
max_column_extra_size
;
uint32_t
num_column_extra
;
uint32_t
num_column_extra
;
uint32_t
num_columns
=
0
;
uint32_t
num_columns
=
0
;
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
uint32_t
columns
[
table
->
s
->
fields
+
altered_table
->
s
->
fields
];
// set size such that we know it is big enough for both cases
uint32_t
columns
[
table
->
s
->
fields
+
altered_table
->
s
->
fields
];
// set size such that we know it is big enough for both cases
memset
(
columns
,
0
,
sizeof
(
columns
));
memset
(
columns
,
0
,
sizeof
(
columns
));
...
@@ -748,7 +748,7 @@ bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_i
...
@@ -748,7 +748,7 @@ bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_i
restore_drop_indexes
(
table
,
index_drop_offsets
,
ha_alter_info
->
index_drop_count
);
restore_drop_indexes
(
table
,
index_drop_offsets
,
ha_alter_info
->
index_drop_count
);
}
}
if
(
ctx
->
compression_changed
)
{
if
(
ctx
->
compression_changed
)
{
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
for
(
uint32_t
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
for
(
uint32_t
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
DB
*
db
=
share
->
key_file
[
i
];
DB
*
db
=
share
->
key_file
[
i
];
int
error
=
db
->
change_compression_method
(
db
,
ctx
->
orig_compression_method
);
int
error
=
db
->
change_compression_method
(
db
,
ctx
->
orig_compression_method
);
...
@@ -773,7 +773,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_in
...
@@ -773,7 +773,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_in
int
error
=
0
;
int
error
=
0
;
tokudb_alter_ctx
*
ctx
=
static_cast
<
tokudb_alter_ctx
*>
(
ha_alter_info
->
handler_ctx
);
tokudb_alter_ctx
*
ctx
=
static_cast
<
tokudb_alter_ctx
*>
(
ha_alter_info
->
handler_ctx
);
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
for
(
uint32_t
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
for
(
uint32_t
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
// change to a new descriptor
// change to a new descriptor
DBT
row_descriptor
;
memset
(
&
row_descriptor
,
0
,
sizeof
row_descriptor
);
DBT
row_descriptor
;
memset
(
&
row_descriptor
,
0
,
sizeof
row_descriptor
);
...
@@ -955,7 +955,7 @@ int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace
...
@@ -955,7 +955,7 @@ int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace
assert
(
0
);
assert
(
0
);
}
}
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
for
(
uint32_t
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
for
(
uint32_t
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
// change to a new descriptor
// change to a new descriptor
DBT
row_descriptor
;
memset
(
&
row_descriptor
,
0
,
sizeof
row_descriptor
);
DBT
row_descriptor
;
memset
(
&
row_descriptor
,
0
,
sizeof
row_descriptor
);
...
@@ -1034,7 +1034,7 @@ int ha_tokudb::alter_table_expand_blobs(TABLE *altered_table, Alter_inplace_info
...
@@ -1034,7 +1034,7 @@ int ha_tokudb::alter_table_expand_blobs(TABLE *altered_table, Alter_inplace_info
int
error
=
0
;
int
error
=
0
;
tokudb_alter_ctx
*
ctx
=
static_cast
<
tokudb_alter_ctx
*>
(
ha_alter_info
->
handler_ctx
);
tokudb_alter_ctx
*
ctx
=
static_cast
<
tokudb_alter_ctx
*>
(
ha_alter_info
->
handler_ctx
);
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
test
(
hidden_primary_key
);
uint32_t
curr_num_DBs
=
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
);
for
(
uint32_t
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
for
(
uint32_t
i
=
0
;
i
<
curr_num_DBs
;
i
++
)
{
// change to a new descriptor
// change to a new descriptor
DBT
row_descriptor
;
memset
(
&
row_descriptor
,
0
,
sizeof
row_descriptor
);
DBT
row_descriptor
;
memset
(
&
row_descriptor
,
0
,
sizeof
row_descriptor
);
...
...
storage/tokudb/ha_tokudb_update.cc
View file @
878ba474
...
@@ -549,7 +549,7 @@ static bool is_strict_mode(THD *thd) {
...
@@ -549,7 +549,7 @@ static bool is_strict_mode(THD *thd) {
#if 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
#if 50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699
return
thd
->
is_strict_mode
();
return
thd
->
is_strict_mode
();
#else
#else
return
test
(
thd
->
variables
.
sql_mode
&
(
MODE_STRICT_TRANS_TABLES
|
MODE_STRICT_ALL_TABLES
));
return
t
okudb_t
est
(
thd
->
variables
.
sql_mode
&
(
MODE_STRICT_TRANS_TABLES
|
MODE_STRICT_ALL_TABLES
));
#endif
#endif
}
}
...
@@ -837,7 +837,7 @@ int ha_tokudb::send_update_message(List<Item> &update_fields, List<Item> &update
...
@@ -837,7 +837,7 @@ int ha_tokudb::send_update_message(List<Item> &update_fields, List<Item> &update
rw_rdlock
(
&
share
->
num_DBs_lock
);
rw_rdlock
(
&
share
->
num_DBs_lock
);
if
(
share
->
num_DBs
>
table
->
s
->
keys
+
test
(
hidden_primary_key
))
{
// hot index in progress
if
(
share
->
num_DBs
>
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
))
{
// hot index in progress
error
=
ENOTSUP
;
// run on the slow path
error
=
ENOTSUP
;
// run on the slow path
}
else
{
}
else
{
// send the update message
// send the update message
...
@@ -990,7 +990,7 @@ int ha_tokudb::send_upsert_message(THD *thd, List<Item> &update_fields, List<Ite
...
@@ -990,7 +990,7 @@ int ha_tokudb::send_upsert_message(THD *thd, List<Item> &update_fields, List<Ite
rw_rdlock
(
&
share
->
num_DBs_lock
);
rw_rdlock
(
&
share
->
num_DBs_lock
);
if
(
share
->
num_DBs
>
table
->
s
->
keys
+
test
(
hidden_primary_key
))
{
// hot index in progress
if
(
share
->
num_DBs
>
table
->
s
->
keys
+
t
okudb_t
est
(
hidden_primary_key
))
{
// hot index in progress
error
=
ENOTSUP
;
// run on the slow path
error
=
ENOTSUP
;
// run on the slow path
}
else
{
}
else
{
// send the upsert message
// send the upsert message
...
...
storage/tokudb/hatoku_defines.h
View file @
878ba474
...
@@ -484,4 +484,7 @@ static inline void tokudb_pthread_cond_broadcast(pthread_cond_t *cond) {
...
@@ -484,4 +484,7 @@ static inline void tokudb_pthread_cond_broadcast(pthread_cond_t *cond) {
assert
(
r
==
0
);
assert
(
r
==
0
);
}
}
// mysql 5.6.15 removed the test macro, so we define our own
#define tokudb_test(e) ((e) ? 1 : 0)
#endif
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment