Commit 86bc6b6a authored by Sergei Golubchik's avatar Sergei Golubchik

tokudb: CLUSTERING=YES syntax for indexes

parent f2b5f1fd
...@@ -125,8 +125,8 @@ static inline uint get_key_parts(const KEY *key); ...@@ -125,8 +125,8 @@ static inline uint get_key_parts(const KEY *key);
#include "tokudb_buffer.h" #include "tokudb_buffer.h"
#include "tokudb_status.h" #include "tokudb_status.h"
#include "tokudb_card.h" #include "tokudb_card.h"
#include "ha_tokudb.h"
#include "hatoku_hton.h" #include "hatoku_hton.h"
#include "ha_tokudb.h"
#include <mysql/plugin.h> #include <mysql/plugin.h>
static const char *ha_tokudb_exts[] = { static const char *ha_tokudb_exts[] = {
...@@ -405,7 +405,8 @@ ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const { ...@@ -405,7 +405,8 @@ ulong ha_tokudb::index_flags(uint idx, uint part, bool all_parts) const {
#if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) #if defined(MARIADB_BASE_VERSION) || (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699)
flags |= HA_DO_INDEX_COND_PUSHDOWN; flags |= HA_DO_INDEX_COND_PUSHDOWN;
#endif #endif
if (table_share->key_info[idx].flags & HA_CLUSTERING) { if (table_share->key_info[idx].option_struct &&
table_share->key_info[idx].option_struct->clustering) {
flags |= HA_CLUSTERED_INDEX; flags |= HA_CLUSTERED_INDEX;
} }
DBUG_RETURN(flags); DBUG_RETURN(flags);
...@@ -1646,7 +1647,7 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K ...@@ -1646,7 +1647,7 @@ static int initialize_key_and_col_info(TABLE_SHARE* table_share, TABLE* table, K
} }
} }
} }
if (i == primary_key || table_share->key_info[i].flags & HA_CLUSTERING) { if (i == primary_key || table_share->key_info[i].option_struct->clustering) {
error = initialize_col_pack_info(kc_info,table_share,i); error = initialize_col_pack_info(kc_info,table_share,i);
if (error) { if (error) {
goto exit; goto exit;
...@@ -3801,7 +3802,7 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) { ...@@ -3801,7 +3802,7 @@ void ha_tokudb::test_row_packing(uchar* record, DBT* pk_key, DBT* pk_val) {
// //
// test key packing of clustering keys // test key packing of clustering keys
// //
if (table->key_info[keynr].flags & HA_CLUSTERING) { if (table->key_info[keynr].option_struct->clustering) {
error = pack_row(&row, (const uchar *) record, keynr); error = pack_row(&row, (const uchar *) record, keynr);
assert(error == 0); assert(error == 0);
uchar* tmp_buff = NULL; uchar* tmp_buff = NULL;
...@@ -4429,7 +4430,7 @@ void ha_tokudb::set_query_columns(uint keynr) { ...@@ -4429,7 +4430,7 @@ void ha_tokudb::set_query_columns(uint keynr) {
key_index = primary_key; key_index = primary_key;
} }
else { else {
key_index = (table->key_info[keynr].flags & HA_CLUSTERING ? keynr : primary_key); key_index = (table->key_info[keynr].option_struct->clustering ? keynr : primary_key);
} }
for (uint i = 0; i < table_share->fields; i++) { for (uint i = 0; i < table_share->fields; i++) {
if (bitmap_is_set(table->read_set,i) || if (bitmap_is_set(table->read_set,i) ||
...@@ -4764,7 +4765,7 @@ int ha_tokudb::read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT con ...@@ -4764,7 +4765,7 @@ int ha_tokudb::read_primary_key(uchar * buf, uint keynr, DBT const *row, DBT con
// //
// case where we read from secondary table that is not clustered // case where we read from secondary table that is not clustered
// //
if (keynr != primary_key && !(table->key_info[keynr].flags & HA_CLUSTERING)) { if (keynr != primary_key && !(table->key_info[keynr].option_struct->clustering)) {
bool has_null; bool has_null;
// //
// create a DBT that has the same data as row, this is inefficient // create a DBT that has the same data as row, this is inefficient
...@@ -4978,7 +4979,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_ ...@@ -4978,7 +4979,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
break; break;
} }
error = handle_cursor_error(error,HA_ERR_KEY_NOT_FOUND,tokudb_active_index); error = handle_cursor_error(error,HA_ERR_KEY_NOT_FOUND,tokudb_active_index);
if (!error && !key_read && tokudb_active_index != primary_key && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING)) { if (!error && !key_read && tokudb_active_index != primary_key && !(table->key_info[tokudb_active_index].option_struct->clustering)) {
error = read_full_row(buf); error = read_full_row(buf);
} }
...@@ -5383,7 +5384,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) { ...@@ -5383,7 +5384,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) {
// key // key
need_val = (this->key_read == 0) && need_val = (this->key_read == 0) &&
(tokudb_active_index == primary_key || (tokudb_active_index == primary_key ||
table->key_info[tokudb_active_index].flags & HA_CLUSTERING table->key_info[tokudb_active_index].option_struct->clustering
); );
if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0) { if ((bytes_used_in_range_query_buff - curr_range_query_buff_offset) > 0) {
...@@ -5463,7 +5464,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) { ...@@ -5463,7 +5464,7 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare) {
// main table. // main table.
// //
if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING) ) { if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].option_struct->clustering) ) {
error = read_full_row(buf); error = read_full_row(buf);
} }
trx->stmt_progress.queried++; trx->stmt_progress.queried++;
...@@ -5544,7 +5545,7 @@ int ha_tokudb::index_first(uchar * buf) { ...@@ -5544,7 +5545,7 @@ int ha_tokudb::index_first(uchar * buf) {
// still need to get entire contents of the row if operation done on // still need to get entire contents of the row if operation done on
// secondary DB and it was NOT a covering index // secondary DB and it was NOT a covering index
// //
if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING) ) { if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].option_struct->clustering) ) {
error = read_full_row(buf); error = read_full_row(buf);
} }
trx->stmt_progress.queried++; trx->stmt_progress.queried++;
...@@ -5586,7 +5587,7 @@ int ha_tokudb::index_last(uchar * buf) { ...@@ -5586,7 +5587,7 @@ int ha_tokudb::index_last(uchar * buf) {
// still need to get entire contents of the row if operation done on // still need to get entire contents of the row if operation done on
// secondary DB and it was NOT a covering index // secondary DB and it was NOT a covering index
// //
if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].flags & HA_CLUSTERING) ) { if (!error && !key_read && (tokudb_active_index != primary_key) && !(table->key_info[tokudb_active_index].option_struct->clustering) ) {
error = read_full_row(buf); error = read_full_row(buf);
} }
...@@ -6739,7 +6740,7 @@ static uint32_t create_secondary_key_descriptor( ...@@ -6739,7 +6740,7 @@ static uint32_t create_secondary_key_descriptor(
form->s, form->s,
kc_info, kc_info,
keynr, keynr,
key_info->flags & HA_CLUSTERING key_info->option_struct->clustering
); );
return ptr - buf; return ptr - buf;
} }
...@@ -7335,7 +7336,7 @@ double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows) ...@@ -7335,7 +7336,7 @@ double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows)
{ {
TOKUDB_DBUG_ENTER("ha_tokudb::keyread_time"); TOKUDB_DBUG_ENTER("ha_tokudb::keyread_time");
double ret_val; double ret_val;
if ((table->key_info[index].flags & HA_CLUSTERING) || (index == primary_key)) { if ((table->key_info[index].option_struct->clustering) || (index == primary_key)) {
ret_val = read_time(index, ranges, rows); ret_val = read_time(index, ranges, rows);
DBUG_RETURN(ret_val); DBUG_RETURN(ret_val);
} }
...@@ -7385,7 +7386,7 @@ double ha_tokudb::read_time( ...@@ -7385,7 +7386,7 @@ double ha_tokudb::read_time(
goto cleanup; goto cleanup;
} }
is_clustering = (table->key_info[index].flags & HA_CLUSTERING); is_clustering = (table->key_info[index].option_struct->clustering);
// //
...@@ -7756,7 +7757,7 @@ int ha_tokudb::tokudb_add_index( ...@@ -7756,7 +7757,7 @@ int ha_tokudb::tokudb_add_index(
curr_index = curr_num_DBs; curr_index = curr_num_DBs;
*modified_DBs = true; *modified_DBs = true;
for (uint i = 0; i < num_of_keys; i++, curr_index++) { for (uint i = 0; i < num_of_keys; i++, curr_index++) {
if (key_info[i].flags & HA_CLUSTERING) { if (key_info[i].option_struct->clustering) {
set_key_filter( set_key_filter(
&share->kc_info.key_filters[curr_index], &share->kc_info.key_filters[curr_index],
&key_info[i], &key_info[i],
......
...@@ -616,7 +616,7 @@ int ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplac ...@@ -616,7 +616,7 @@ int ha_tokudb::alter_table_add_or_drop_column(TABLE *altered_table, Alter_inplac
if (error) if (error)
goto cleanup; goto cleanup;
if (i == primary_key || table_share->key_info[i].flags & HA_CLUSTERING) { if (i == primary_key || table_share->key_info[i].option_struct.clustering) {
num_column_extra = fill_row_mutator( num_column_extra = fill_row_mutator(
column_extra, column_extra,
columns, columns,
...@@ -734,7 +734,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_in ...@@ -734,7 +734,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(TABLE *altered_table, Alter_in
break; break;
// for all trees that have values, make an update variable offsets message and broadcast it into the tree // for all trees that have values, make an update variable offsets message and broadcast it into the tree
if (i == primary_key || (table_share->key_info[i].flags & HA_CLUSTERING)) { if (i == primary_key || (table_share->key_info[i].option_struct.clustering)) {
uint32_t offset_start = table_share->null_bytes + share->kc_info.mcp_info[i].fixed_field_size; uint32_t offset_start = table_share->null_bytes + share->kc_info.mcp_info[i].fixed_field_size;
uint32_t offset_end = offset_start + share->kc_info.mcp_info[i].len_of_offsets; uint32_t offset_end = offset_start + share->kc_info.mcp_info[i].len_of_offsets;
uint32_t number_of_offsets = offset_end - offset_start; uint32_t number_of_offsets = offset_end - offset_start;
...@@ -916,7 +916,7 @@ int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace ...@@ -916,7 +916,7 @@ int ha_tokudb::alter_table_expand_one_column(TABLE *altered_table, Alter_inplace
break; break;
// for all trees that have values, make an expand update message and broadcast it into the tree // for all trees that have values, make an expand update message and broadcast it into the tree
if (i == primary_key || (table_share->key_info[i].flags & HA_CLUSTERING)) { if (i == primary_key || (table_share->key_info[i].option_struct.clustering)) {
uint32_t old_offset = alter_table_field_offset(table_share->null_bytes, ctx->table_kc_info, i, expand_field_num); uint32_t old_offset = alter_table_field_offset(table_share->null_bytes, ctx->table_kc_info, i, expand_field_num);
uint32_t new_offset = alter_table_field_offset(table_share->null_bytes, ctx->altered_table_kc_info, i, expand_field_num); uint32_t new_offset = alter_table_field_offset(table_share->null_bytes, ctx->altered_table_kc_info, i, expand_field_num);
assert(old_offset <= new_offset); assert(old_offset <= new_offset);
......
...@@ -126,7 +126,7 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print ...@@ -126,7 +126,7 @@ static bool tables_have_same_keys(TABLE* table, TABLE* altered_table, bool print
retval = false; retval = false;
goto cleanup; goto cleanup;
} }
if (((curr_orig_key->flags & HA_CLUSTERING) == 0) != ((curr_altered_key->flags & HA_CLUSTERING) == 0)) { if (curr_orig_key->option_struct->clustering != curr_altered_key->option_struct->clustering) {
if (print_error) { if (print_error) {
sql_print_error( sql_print_error(
"keys disagree on if they are clustering, %d, %d", "keys disagree on if they are clustering, %d, %d",
......
...@@ -538,7 +538,7 @@ static bool check_point_update(Item *conds, TABLE *table) { ...@@ -538,7 +538,7 @@ static bool check_point_update(Item *conds, TABLE *table) {
// Precompute this when the table is opened. // Precompute this when the table is opened.
static bool clustering_keys_exist(TABLE *table) { static bool clustering_keys_exist(TABLE *table) {
for (uint keynr = 0; keynr < table->s->keys; keynr++) { for (uint keynr = 0; keynr < table->s->keys; keynr++) {
if (keynr != table->s->primary_key && (table->s->key_info[keynr].flags & HA_CLUSTERING)) if (keynr != table->s->primary_key && (table->s->key_info[keynr].option_struct->clustering))
return true; return true;
} }
return false; return false;
......
...@@ -281,6 +281,11 @@ typedef struct st_tokudb_trx_data { ...@@ -281,6 +281,11 @@ typedef struct st_tokudb_trx_data {
bool checkpoint_lock_taken; bool checkpoint_lock_taken;
} tokudb_trx_data; } tokudb_trx_data;
struct ha_index_option_struct
{
bool clustering;
};
extern char *tokudb_data_dir; extern char *tokudb_data_dir;
extern const char *ha_tokudb_ext; extern const char *ha_tokudb_ext;
......
...@@ -411,6 +411,12 @@ extern "C" { ...@@ -411,6 +411,12 @@ extern "C" {
} }
#endif #endif
ha_create_table_option tokudb_index_options[]=
{
HA_IOPTION_BOOL("clustering", clustering, 0),
HA_IOPTION_END
};
// A flag set if the handlerton is in an initialized, usable state, // A flag set if the handlerton is in an initialized, usable state,
// plus a reader-write lock to protect it without serializing reads. // plus a reader-write lock to protect it without serializing reads.
// Since we don't have static initializers for the opaque rwlock type, // Since we don't have static initializers for the opaque rwlock type,
...@@ -494,6 +500,8 @@ static int tokudb_init_func(void *p) { ...@@ -494,6 +500,8 @@ static int tokudb_init_func(void *p) {
tokudb_hton->rollback_by_xid=tokudb_rollback_by_xid; tokudb_hton->rollback_by_xid=tokudb_rollback_by_xid;
#endif #endif
tokudb_hton->index_options= tokudb_index_options;
tokudb_hton->panic = tokudb_end; tokudb_hton->panic = tokudb_end;
tokudb_hton->flush_logs = tokudb_flush_logs; tokudb_hton->flush_logs = tokudb_flush_logs;
tokudb_hton->show_status = tokudb_show_status; tokudb_hton->show_status = tokudb_show_status;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment