Commit 1b0261cc authored by Rich Prohaska's avatar Rich Prohaska

try to retain cardinality data after add/drop index

parent e402a3ea
...@@ -55,7 +55,10 @@ int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) { ...@@ -55,7 +55,10 @@ int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) {
struct analyze_progress_extra analyze_progress_extra = { struct analyze_progress_extra analyze_progress_extra = {
thd, share, table_share, i, key_name, time(0), write_status_msg thd, share, table_share, i, key_name, time(0), write_status_msg
}; };
int error = tokudb::analyze_card(share->key_file[i], txn, false, num_key_parts, &rec_per_key[next_key_part], bool is_unique = false;
if (i == primary_key || (key_info->flags & HA_NOSAME))
is_unique = true;
int error = tokudb::analyze_card(share->key_file[i], txn, is_unique, num_key_parts, &rec_per_key[next_key_part],
tokudb_cmp_dbt_key_parts, analyze_progress, &analyze_progress_extra); tokudb_cmp_dbt_key_parts, analyze_progress, &analyze_progress_extra);
if (error != 0 && error != ETIME) { if (error != 0 && error != ETIME) {
result = HA_ADMIN_FAILED; result = HA_ADMIN_FAILED;
......
...@@ -422,7 +422,7 @@ int ha_tokudb::alter_table_add_index(TABLE *altered_table, Alter_inplace_info *h ...@@ -422,7 +422,7 @@ int ha_tokudb::alter_table_add_index(TABLE *altered_table, Alter_inplace_info *h
my_free(key_info); my_free(key_info);
if (error == 0) if (error == 0)
tokudb::delete_card_from_status(share->status_block, ctx->alter_txn); tokudb::set_card_from_status(share->status_block, ctx->alter_txn, table->s, altered_table->s);
return error; return error;
} }
...@@ -469,7 +469,7 @@ int ha_tokudb::alter_table_drop_index(TABLE *altered_table, Alter_inplace_info * ...@@ -469,7 +469,7 @@ int ha_tokudb::alter_table_drop_index(TABLE *altered_table, Alter_inplace_info *
int error = drop_indexes(table, index_drop_offsets, ha_alter_info->index_drop_count, key_info, ctx->alter_txn); int error = drop_indexes(table, index_drop_offsets, ha_alter_info->index_drop_count, key_info, ctx->alter_txn);
if (error == 0) if (error == 0)
tokudb::delete_card_from_status(share->status_block, ctx->alter_txn); tokudb::set_card_from_status(share->status_block, ctx->alter_txn, table->s, altered_table->s);
return error; return error;
} }
......
...@@ -4,9 +4,9 @@ CHECKS = $(patsubst %,%.check,$(TARGETS)) ...@@ -4,9 +4,9 @@ CHECKS = $(patsubst %,%.check,$(TARGETS))
CPPFLAGS = -I.. -D__STDC_FORMAT_MACROS CPPFLAGS = -I.. -D__STDC_FORMAT_MACROS
CXXFLAGS = -g -Wall -Wextra -Wno-missing-field-initializers -Wshadow CXXFLAGS = -g -Wall -Wextra -Wno-missing-field-initializers -Wshadow
FRACTALTREE_BASE_DIR = ../../../../tokudb FRACTALTREE_BASE_DIR = ../ft-index
FRACTALTREE_DIR = $(FRACTALTREE_BASE_DIR)/release FRACTALTREE_INSTALL_DIR = $(FRACTALTREE_BASE_DIR)/install.debug
VALGRIND = valgrind -q --leak-check=full --show-reachable=yes --suppressions=$(FRACTALTREE_BASE_DIR)/build.debug/valgrind.suppressions --soname-synonyms=somalloc=*tokuportability* VALGRIND = valgrind -q --leak-check=full --show-reachable=yes --suppressions=$(FRACTALTREE_BASE_DIR)/ft/valgrind.suppressions --soname-synonyms=somalloc=*tokuportability*
ifeq ($(GCOV),1) ifeq ($(GCOV),1)
CXXFLAGS += -fprofile-arcs -ftest-coverage CXXFLAGS += -fprofile-arcs -ftest-coverage
...@@ -21,7 +21,7 @@ check: $(CHECKS) ...@@ -21,7 +21,7 @@ check: $(CHECKS)
true true
%.check: % %.check: %
LD_LIBRARY_PATH=$(FRACTALTREE_DIR)/lib $(VALGRIND) ./$< LD_LIBRARY_PATH=$(FRACTALTREE_INSTALL_DIR)/lib $(VALGRIND) ./$<
card.check: card_test.check card_1.check card_inf.check card_inf_1.check card_random_1.check card_etime.check card.check: card_test.check card_1.check card_inf.check card_inf_1.check card_random_1.check card_etime.check
true true
...@@ -33,4 +33,4 @@ max_test.check: max_test ...@@ -33,4 +33,4 @@ max_test.check: max_test
$(CXX) $(CPPFLAGS) $(CXXFLAGS) -g -o $@ $< $(CXX) $(CPPFLAGS) $(CXXFLAGS) -g -o $@ $<
card_%: card_%.cc card_%: card_%.cc
$(CXX) $(CPPFLAGS) $(CXXFLAGS) -g -o $@ $< -I.. -I$(FRACTALTREE_DIR)/include -L$(FRACTALTREE_DIR)/lib -ltokudb -ltokuportability $(CXX) $(CPPFLAGS) $(CXXFLAGS) -g -o $@ $< -I.. -I$(FRACTALTREE_INSTALL_DIR)/include -L$(FRACTALTREE_INSTALL_DIR)/lib -ltokufractaltree -ltokuportability
...@@ -19,27 +19,9 @@ ...@@ -19,27 +19,9 @@
typedef unsigned long long ulonglong; typedef unsigned long long ulonglong;
#include "tokudb_status.h" #include "tokudb_status.h"
#include "tokudb_buffer.h" #include "tokudb_buffer.h"
// Provide some mimimal MySQL classes just to compile the tokudb cardinality functions
class KEY_INFO { #include "fake_mysql.h"
public:
uint flags;
uint64_t *rec_per_key;
};
#define HA_NOSAME 1
class TABLE_SHARE {
public:
uint primary_key;
uint keys;
};
class TABLE {
public:
TABLE_SHARE *s;
KEY_INFO *key_info;
};
uint get_key_parts(KEY_INFO *key_info) {
assert(key_info);
return 0;
}
#if __APPLE__ #if __APPLE__
typedef unsigned long ulong; typedef unsigned long ulong;
#endif #endif
...@@ -111,6 +93,11 @@ static void test_card(DB_ENV *env, DB *db, uint64_t expect_card) { ...@@ -111,6 +93,11 @@ static void test_card(DB_ENV *env, DB *db, uint64_t expect_card) {
assert(rec_per_key[0] == expect_card); assert(rec_per_key[0] == expect_card);
r = tokudb::analyze_card(db, txn, true, num_key_parts, rec_per_key, analyze_key_compare, NULL, NULL);
assert(r == 0);
assert(rec_per_key[0] == expect_card);
r = txn->commit(txn, 0); r = txn->commit(txn, 0);
assert(r == 0); assert(r == 0);
} }
......
...@@ -20,27 +20,7 @@ ...@@ -20,27 +20,7 @@
typedef unsigned long long ulonglong; typedef unsigned long long ulonglong;
#include "tokudb_status.h" #include "tokudb_status.h"
#include "tokudb_buffer.h" #include "tokudb_buffer.h"
// Provide some mimimal MySQL classes just to compile the tokudb cardinality functions #include "fake_mysql.h"
class KEY_INFO {
public:
uint flags;
uint64_t *rec_per_key;
};
#define HA_NOSAME 1
class TABLE_SHARE {
public:
uint primary_key;
uint keys;
};
class TABLE {
public:
TABLE_SHARE *s;
KEY_INFO *key_info;
};
uint get_key_parts(KEY_INFO *key_info) {
assert(key_info);
return 0;
}
#if __APPLE__ #if __APPLE__
typedef unsigned long ulong; typedef unsigned long ulong;
#endif #endif
......
...@@ -18,27 +18,7 @@ ...@@ -18,27 +18,7 @@
typedef unsigned long long ulonglong; typedef unsigned long long ulonglong;
#include "tokudb_status.h" #include "tokudb_status.h"
#include "tokudb_buffer.h" #include "tokudb_buffer.h"
// Provide some mimimal MySQL classes just to compile the tokudb cardinality functions #include "fake_mysql.h"
class KEY_INFO {
public:
uint flags;
uint64_t *rec_per_key;
};
#define HA_NOSAME 1
class TABLE_SHARE {
public:
uint primary_key;
uint keys;
};
class TABLE {
public:
TABLE_SHARE *s;
KEY_INFO *key_info;
};
uint get_key_parts(KEY_INFO *key_info) {
assert(key_info);
return 0;
}
#if __APPLE__ #if __APPLE__
typedef unsigned long ulong; typedef unsigned long ulong;
#endif #endif
......
...@@ -18,27 +18,7 @@ ...@@ -18,27 +18,7 @@
typedef unsigned long long ulonglong; typedef unsigned long long ulonglong;
#include "tokudb_status.h" #include "tokudb_status.h"
#include "tokudb_buffer.h" #include "tokudb_buffer.h"
// Provide some mimimal MySQL classes just to compile the tokudb cardinality functions #include "fake_mysql.h"
class KEY_INFO {
public:
uint flags;
uint64_t *rec_per_key;
};
#define HA_NOSAME 1
class TABLE_SHARE {
public:
uint primary_key;
uint keys;
};
class TABLE {
public:
TABLE_SHARE *s;
KEY_INFO *key_info;
};
uint get_key_parts(KEY_INFO *key_info) {
assert(key_info);
return 0;
}
#if __APPLE__ #if __APPLE__
typedef unsigned long ulong; typedef unsigned long ulong;
#endif #endif
......
...@@ -19,27 +19,7 @@ ...@@ -19,27 +19,7 @@
typedef unsigned long long ulonglong; typedef unsigned long long ulonglong;
#include "tokudb_status.h" #include "tokudb_status.h"
#include "tokudb_buffer.h" #include "tokudb_buffer.h"
// Provide some mimimal MySQL classes just to compile the tokudb cardinality functions #include "fake_mysql.h"
class KEY_INFO {
public:
uint flags;
uint64_t *rec_per_key;
};
#define HA_NOSAME 1
class TABLE_SHARE {
public:
uint primary_key;
uint keys;
};
class TABLE {
public:
TABLE_SHARE *s;
KEY_INFO *key_info;
};
uint get_key_parts(KEY_INFO *key_info) {
assert(key_info);
return 0;
}
#if __APPLE__ #if __APPLE__
typedef unsigned long ulong; typedef unsigned long ulong;
#endif #endif
......
...@@ -11,27 +11,8 @@ typedef unsigned long long ulonglong; ...@@ -11,27 +11,8 @@ typedef unsigned long long ulonglong;
#include <tokudb_status.h> #include <tokudb_status.h>
#include <tokudb_buffer.h> #include <tokudb_buffer.h>
// Provide some mimimal MySQL classes just to compile the tokudb cardinality functions #include "fake_mysql.h"
class KEY_INFO {
public:
uint flags;
uint64_t *rec_per_key;
};
#define HA_NOSAME 1
class TABLE_SHARE {
public:
uint primary_key;
uint keys;
};
class TABLE {
public:
TABLE_SHARE *s;
KEY_INFO *key_info;
};
uint get_key_parts(KEY_INFO *key_info) {
assert(key_info);
return 0;
}
#if __APPLE__ #if __APPLE__
typedef unsigned long ulong; typedef unsigned long ulong;
#endif #endif
......
// Provide some mimimal MySQL classes just to compile the tokudb cardinality functions
class KEY_INFO {
public:
uint flags;
uint key_parts;
uint64_t *rec_per_key;
char *name;
};
#define HA_NOSAME 1
class TABLE_SHARE {
public:
uint primary_key;
uint keys, key_parts;
KEY_INFO *key_info;
};
class TABLE {
public:
TABLE_SHARE *s;
KEY_INFO *key_info;
};
uint get_key_parts(KEY_INFO *key_info) {
assert(key_info);
return 0;
}
...@@ -66,6 +66,52 @@ namespace tokudb { ...@@ -66,6 +66,52 @@ namespace tokudb {
assert(error == 0); assert(error == 0);
} }
bool find_index_of_key(const char *key_name, TABLE_SHARE *table_share, uint *index_offset_ptr) {
for (uint i = 0; i < table_share->keys; i++) {
if (strcmp(key_name, table_share->key_info[i].name) == 0) {
*index_offset_ptr = i;
return true;
}
}
return false;
}
void set_card_from_status(DB *status_db, DB_TXN *txn, TABLE_SHARE *table_share, TABLE_SHARE *altered_table_share) {
int error;
// read existing cardinality data from status
uint64_t rec_per_key[table_share->key_parts];
error = get_card_from_status(status_db, txn, table_share->key_parts, rec_per_key);
uint64_t altered_rec_per_key[altered_table_share->key_parts];
for (uint i = 0; i < altered_table_share->key_parts; i++)
altered_rec_per_key[i] = 0;
// compute the beginning of the key offsets
uint orig_key_offset[table_share->keys];
uint orig_key_parts = 0;
for (uint i = 0; i < table_share->keys; i++) {
orig_key_offset[i] = orig_key_parts;
orig_key_parts += table_share->key_info[i].key_parts;
}
// if orig card data exists, then use it to compute new card data
if (error == 0) {
uint key_parts = 0;
for (uint i = 0; error == 0 && i < altered_table_share->keys; i++) {
uint orig_key_index;
if (find_index_of_key(altered_table_share->key_info[i].name, table_share, &orig_key_index)) {
memcpy(&altered_rec_per_key[key_parts], &rec_per_key[orig_key_offset[orig_key_index]], altered_table_share->key_info[i].key_parts);
}
key_parts += altered_table_share->key_info[i].key_parts;
}
}
if (error == 0)
set_card_in_status(status_db, txn, altered_table_share->key_parts, altered_rec_per_key);
else
delete_card_from_status(status_db, txn);
}
// Compute records per key for all key parts of the ith key of the table. // Compute records per key for all key parts of the ith key of the table.
// For each key part, put records per key part in *rec_per_key_part[key_part_index]. // For each key part, put records per key part in *rec_per_key_part[key_part_index].
// Returns 0 if success, otherwise an error number. // Returns 0 if success, otherwise an error number.
...@@ -74,66 +120,70 @@ namespace tokudb { ...@@ -74,66 +120,70 @@ namespace tokudb {
int (*key_compare)(DB *, const DBT *, const DBT *, uint), int (*key_compare)(DB *, const DBT *, const DBT *, uint),
int (*analyze_progress)(void *extra, uint64_t rows), void *progress_extra) { int (*analyze_progress)(void *extra, uint64_t rows), void *progress_extra) {
int error = 0; int error = 0;
DBC *cursor = NULL; uint64_t rows = 0;
error = db->cursor(db, txn, &cursor, 0); uint64_t unique_rows[num_key_parts];
if (error == 0) { if (is_unique && num_key_parts == 1) {
uint64_t rows = 0; rows = unique_rows[0] = 1;
uint64_t unique_rows[num_key_parts]; } else {
for (uint64_t i = 0; i < num_key_parts; i++) DBC *cursor = NULL;
unique_rows[i] = 1; error = db->cursor(db, txn, &cursor, 0);
// stop looking when the entire dictionary was analyzed, or a cap on execution time was reached, or the analyze was killed. if (error == 0) {
DBT key = {}; key.flags = DB_DBT_REALLOC; for (uint64_t i = 0; i < num_key_parts; i++)
DBT prev_key = {}; prev_key.flags = DB_DBT_REALLOC; unique_rows[i] = 1;
while (1) { // stop looking when the entire dictionary was analyzed, or a cap on execution time was reached, or the analyze was killed.
error = cursor->c_get(cursor, &key, 0, DB_NEXT); DBT key = {}; key.flags = DB_DBT_REALLOC;
if (error != 0) { DBT prev_key = {}; prev_key.flags = DB_DBT_REALLOC;
if (error == DB_NOTFOUND) while (1) {
error = cursor->c_get(cursor, &key, 0, DB_NEXT);
if (error != 0) {
if (error == DB_NOTFOUND)
error = 0; // eof is not an error error = 0; // eof is not an error
break; break;
} }
rows++; rows++;
// first row is a unique row, otherwise compare with the previous key // first row is a unique row, otherwise compare with the previous key
bool copy_key = false; bool copy_key = false;
if (rows == 1) { if (rows == 1) {
copy_key = true; copy_key = true;
} else { } else {
// compare this key with the previous key. ignore appended PK for SK's. // compare this key with the previous key. ignore appended PK for SK's.
// TODO if a prefix is different, then all larger keys that include the prefix are also different. // TODO if a prefix is different, then all larger keys that include the prefix are also different.
// TODO if we are comparing the entire primary key or the entire unique secondary key, then the cardinality must be 1, // TODO if we are comparing the entire primary key or the entire unique secondary key, then the cardinality must be 1,
// so we can avoid computing it. // so we can avoid computing it.
for (uint64_t i = 0; i < num_key_parts; i++) { for (uint64_t i = 0; i < num_key_parts; i++) {
int cmp = key_compare(db, &prev_key, &key, i+1); int cmp = key_compare(db, &prev_key, &key, i+1);
if (cmp != 0) { if (cmp != 0) {
unique_rows[i]++; unique_rows[i]++;
copy_key = true; copy_key = true;
}
} }
} }
// prev_key = key
if (copy_key) {
prev_key.data = realloc(prev_key.data, key.size);
assert(prev_key.data);
prev_key.size = key.size;
memcpy(prev_key.data, key.data, prev_key.size);
}
// check for limit
if (analyze_progress && (rows % 1000) == 0) {
error = analyze_progress(progress_extra, rows);
if (error)
break;
}
} }
// prev_key = key // cleanup
if (copy_key) { free(key.data);
prev_key.data = realloc(prev_key.data, key.size); free(prev_key.data);
assert(prev_key.data); int close_error = cursor->c_close(cursor);
prev_key.size = key.size; assert(close_error == 0);
memcpy(prev_key.data, key.data, prev_key.size);
}
// check for limit
if (analyze_progress && (rows % 1000) == 0) {
error = analyze_progress(progress_extra, rows);
if (error)
break;
}
}
// cleanup
free(key.data);
free(prev_key.data);
int close_error = cursor->c_close(cursor);
assert(close_error == 0);
// return cardinality
if (error == 0 || error == ETIME) {
for (uint64_t i = 0; i < num_key_parts; i++)
rec_per_key_part[i] = rows / unique_rows[i];
} }
} }
// return cardinality
if (error == 0 || error == ETIME) {
for (uint64_t i = 0; i < num_key_parts; i++)
rec_per_key_part[i] = rows / unique_rows[i];
}
return error; return error;
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment