Commit f20598b2 authored by Sergei Golubchik's avatar Sergei Golubchik

TokuDB 7.5.4

...@@ -72,18 +72,34 @@ include(CheckCXXCompilerFlag) ...@@ -72,18 +72,34 @@ include(CheckCXXCompilerFlag)
macro(set_cflags_if_supported) macro(set_cflags_if_supported)
foreach(flag ${ARGN}) foreach(flag ${ARGN})
check_c_compiler_flag(${flag} HAVE_C_${flag}) string(REGEX REPLACE "-" "_" temp_flag ${flag})
if (HAVE_C_${flag}) check_c_compiler_flag(${flag} HAVE_C_${temp_flag})
if (HAVE_C_${temp_flag})
set(CMAKE_C_FLAGS "${flag} ${CMAKE_C_FLAGS}") set(CMAKE_C_FLAGS "${flag} ${CMAKE_C_FLAGS}")
endif () endif ()
check_cxx_compiler_flag(${flag} HAVE_CXX_${flag}) check_cxx_compiler_flag(${flag} HAVE_CXX_${temp_flag})
if (HAVE_CXX_${flag}) if (HAVE_CXX_${temp_flag})
set(CMAKE_CXX_FLAGS "${flag} ${CMAKE_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${flag} ${CMAKE_CXX_FLAGS}")
endif () endif ()
endforeach(flag) endforeach(flag)
endmacro(set_cflags_if_supported) endmacro(set_cflags_if_supported)
macro(append_cflags_if_supported)
foreach(flag ${ARGN})
string(REGEX REPLACE "-" "_" temp_flag ${flag})
check_c_compiler_flag(${flag} HAVE_C_${temp_flag})
if (HAVE_C_${temp_flag})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}")
endif ()
check_cxx_compiler_flag(${flag} HAVE_CXX_${temp_flag})
if (HAVE_CXX_${temp_flag})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
endif ()
endforeach(flag)
endmacro(append_cflags_if_supported)
set_cflags_if_supported(-Wno-missing-field-initializers) set_cflags_if_supported(-Wno-missing-field-initializers)
append_cflags_if_supported(-Wno-vla)
ADD_SUBDIRECTORY(ft-index) ADD_SUBDIRECTORY(ft-index)
......
TokuDB TokuDB
====== ======
TokuDB is a high-performance, write optimized, transactional storage engine for MySQL and TokuDB is a high-performance, write optimized, transactional storage engine for MySQL, MariaDB, and Percona Server.
MariaDB. For more details, see our [product page][products]. For more details, see our [product page][products].
This repository contains the MySQL plugin that uses the [TokuFT][tokuft] This repository contains the MySQL plugin that uses the [TokuFT][tokuft] core.
core.
There are also patches to the MySQL and MariaDB kernels, available in our There are also patches to the MySQL and MariaDB kernels, available in our
forks of [mysql][mysql] and [mariadb][mariadb]. forks of [mysql][mysql] and [mariadb][mariadb].
...@@ -15,23 +14,30 @@ forks of [mysql][mysql] and [mariadb][mariadb]. ...@@ -15,23 +14,30 @@ forks of [mysql][mysql] and [mariadb][mariadb].
[mysql]: http://github.com/Tokutek/mysql [mysql]: http://github.com/Tokutek/mysql
[mariadb]: http://github.com/Tokutek/mariadb [mariadb]: http://github.com/Tokutek/mariadb
Download
Building
-------- --------
* [MySQL 5.5 + TokuDB](http://www.tokutek.com/tokudb-for-mysql/download-community/)
* [MariaDB 5.5 + TokuDB](http://www.tokutek.com/tokudb-for-mysql/download-community/)
* [MariaDB 10.0 + TokuDB](https://downloads.mariadb.org/)
* [Percona Server 5.6 + TokuDB](http://www.percona.com/downloads/)
Build
-----
The `scripts/` directory contains a script that can be used to build a The `scripts/` directory contains a script that can be used to build a
working MySQL or MariaDB with Tokutek patches, and with the TokuDB storage working MySQL or MariaDB with Tokutek patches, and with the TokuDB storage
engine, called `make.mysql.bash`. This script will download copies of the engine, called `make.mysql.bash`. This script will download copies of the
needed source code from github and build everything. needed source code from github and build everything.
To build MySQL 5.5.39 with TokuDB 7.5.2: To build MySQL 5.5.40 with TokuDB 7.5.3:
```sh ```sh
scripts/make.mysql.bash --mysqlbuild=mysql-5.5.39-tokudb-7.5.2-linux-x86_64 scripts/make.mysql.bash --mysqlbuild=mysql-5.5.40-tokudb-7.5.3-linux-x86_64
``` ```
To build MariaDB 5.5.39 with TokuDB 7.5.2: To build MariaDB 5.5.40 with TokuDB 7.5.3:
```sh ```sh
scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.39-tokudb-7.5.2-linux-x86_64 scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.40-tokudb-7.5.3-linux-x86_64
``` ```
Before you start, make sure you have a C++11-compatible compiler (GCC >= Before you start, make sure you have a C++11-compatible compiler (GCC >=
...@@ -54,10 +60,10 @@ scripts/make.mysql.debug.env.bash ...@@ -54,10 +60,10 @@ scripts/make.mysql.debug.env.bash
``` ```
Contributing Contribute
------------ ----------
Please report bugs in TokuDB here on github. Please report TokuDB bugs at https://tokutek.atlassian.net/browse/DB.
We have two publicly accessible mailing lists: We have two publicly accessible mailing lists:
...@@ -66,7 +72,7 @@ We have two publicly accessible mailing lists: ...@@ -66,7 +72,7 @@ We have two publicly accessible mailing lists:
- tokudb-dev@googlegroups.com is for discussion of the development of - tokudb-dev@googlegroups.com is for discussion of the development of
TokuDB. TokuDB.
We are also available on IRC on freenode.net, in the #tokutek channel. We are on IRC on freenode.net, in the #tokutek channel.
License License
...@@ -74,7 +80,7 @@ License ...@@ -74,7 +80,7 @@ License
TokuDB is available under the GPL version 2. See [COPYING][copying] TokuDB is available under the GPL version 2. See [COPYING][copying]
The TokuKV component of TokuDB is available under the GPL version 2, with The TokuFT component of TokuDB is available under the GPL version 2, with
slight modifications. See [README-TOKUDB][license]. slight modifications. See [README-TOKUDB][license].
[copying]: http://github.com/Tokutek/tokudb-engine/blob/master/COPYING [copying]: http://github.com/Tokutek/tokudb-engine/blob/master/COPYING
......
...@@ -585,6 +585,7 @@ static void print_db_txn_struct (void) { ...@@ -585,6 +585,7 @@ static void print_db_txn_struct (void) {
"uint64_t (*id64) (DB_TXN*)", "uint64_t (*id64) (DB_TXN*)",
"void (*set_client_id)(DB_TXN *, uint64_t client_id)", "void (*set_client_id)(DB_TXN *, uint64_t client_id)",
"uint64_t (*get_client_id)(DB_TXN *)", "uint64_t (*get_client_id)(DB_TXN *)",
"bool (*is_prepared)(DB_TXN *)",
NULL}; NULL};
sort_and_dump_fields("db_txn", false, extra); sort_and_dump_fields("db_txn", false, extra);
} }
......
...@@ -616,6 +616,7 @@ typedef enum { ...@@ -616,6 +616,7 @@ typedef enum {
FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS, FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS,
FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS,
FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE, FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE,
FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, // how many deleted leaf entries were skipped by a cursor
FT_STATUS_NUM_ROWS FT_STATUS_NUM_ROWS
} ft_status_entry; } ft_status_entry;
......
...@@ -377,6 +377,8 @@ status_init(void) ...@@ -377,6 +377,8 @@ status_init(void)
STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, nullptr, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (out-of-bounds)", TOKU_ENGINE_STATUS); STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS, nullptr, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (out-of-bounds)", TOKU_ENGINE_STATUS);
STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE,nullptr, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (child reactive)", TOKU_ENGINE_STATUS); STATUS_INIT(FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE,nullptr, PARCOUNT, "promotion: tried the rightmost leaf shorcut but failed (child reactive)", TOKU_ENGINE_STATUS);
STATUS_INIT(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, CURSOR_SKIP_DELETED_LEAF_ENTRY, PARCOUNT, "cursor skipped deleted leaf entries", TOKU_ENGINE_STATUS|TOKU_GLOBAL_STATUS);
ft_status.initialized = true; ft_status.initialized = true;
} }
static void status_destroy(void) { static void status_destroy(void) {
...@@ -3378,13 +3380,13 @@ ok: ; ...@@ -3378,13 +3380,13 @@ ok: ;
if (le_val_is_del(le, ftcursor->is_snapshot_read, ftcursor->ttxn)) { if (le_val_is_del(le, ftcursor->is_snapshot_read, ftcursor->ttxn)) {
// Provisionally deleted stuff is gone. // Provisionally deleted stuff is gone.
// So we need to scan in the direction to see if we can find something. // So we need to scan in the direction to see if we can find something.
// Every 100 deleted leaf entries check if the leaf's key is within the search bounds. // Every 64 deleted leaf entries check if the leaf's key is within the search bounds.
for (uint n_deleted = 1; ; n_deleted++) { for (uint64_t n_deleted = 1; ; n_deleted++) {
switch (search->direction) { switch (search->direction) {
case FT_SEARCH_LEFT: case FT_SEARCH_LEFT:
idx++; idx++;
if (idx >= bn->data_buffer.num_klpairs() || if (idx >= bn->data_buffer.num_klpairs() || ((n_deleted % 64) == 0 && !search_continue(search, key, keylen))) {
((n_deleted % 64) == 0 && !search_continue(search, key, keylen))) { STATUS_INC(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, n_deleted);
if (ftcursor->interrupt_cb && ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra)) { if (ftcursor->interrupt_cb && ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra)) {
return TOKUDB_INTERRUPTED; return TOKUDB_INTERRUPTED;
} }
...@@ -3393,6 +3395,7 @@ ok: ; ...@@ -3393,6 +3395,7 @@ ok: ;
break; break;
case FT_SEARCH_RIGHT: case FT_SEARCH_RIGHT:
if (idx == 0) { if (idx == 0) {
STATUS_INC(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, n_deleted);
if (ftcursor->interrupt_cb && ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra)) { if (ftcursor->interrupt_cb && ftcursor->interrupt_cb(ftcursor->interrupt_cb_extra)) {
return TOKUDB_INTERRUPTED; return TOKUDB_INTERRUPTED;
} }
...@@ -3406,6 +3409,7 @@ ok: ; ...@@ -3406,6 +3409,7 @@ ok: ;
r = bn->data_buffer.fetch_klpair(idx, &le, &keylen, &key); r = bn->data_buffer.fetch_klpair(idx, &le, &keylen, &key);
assert_zero(r); // we just validated the index assert_zero(r); // we just validated the index
if (!le_val_is_del(le, ftcursor->is_snapshot_read, ftcursor->ttxn)) { if (!le_val_is_del(le, ftcursor->is_snapshot_read, ftcursor->ttxn)) {
STATUS_INC(FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, n_deleted);
goto got_a_good_value; goto got_a_good_value;
} }
} }
......
...@@ -113,12 +113,19 @@ void lock_request::create(void) { ...@@ -113,12 +113,19 @@ void lock_request::create(void) {
m_complete_r = 0; m_complete_r = 0;
m_state = state::UNINITIALIZED; m_state = state::UNINITIALIZED;
m_info = nullptr;
toku_cond_init(&m_wait_cond, nullptr); toku_cond_init(&m_wait_cond, nullptr);
m_start_test_callback = nullptr;
m_retry_test_callback = nullptr;
} }
// destroy a lock request. // destroy a lock request.
void lock_request::destroy(void) { void lock_request::destroy(void) {
invariant(m_state != state::PENDING);
invariant(m_state != state::DESTROYED);
m_state = state::DESTROYED;
toku_destroy_dbt(&m_left_key_copy); toku_destroy_dbt(&m_left_key_copy);
toku_destroy_dbt(&m_right_key_copy); toku_destroy_dbt(&m_right_key_copy);
toku_cond_destroy(&m_wait_cond); toku_cond_destroy(&m_wait_cond);
...@@ -135,7 +142,7 @@ void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT ...@@ -135,7 +142,7 @@ void lock_request::set(locktree *lt, TXNID txnid, const DBT *left_key, const DBT
toku_destroy_dbt(&m_right_key_copy); toku_destroy_dbt(&m_right_key_copy);
m_type = lock_type; m_type = lock_type;
m_state = state::INITIALIZED; m_state = state::INITIALIZED;
m_info = lt->get_lock_request_info(); m_info = lt ? lt->get_lock_request_info() : nullptr;
m_big_txn = big_txn; m_big_txn = big_txn;
} }
...@@ -223,15 +230,18 @@ int lock_request::start(void) { ...@@ -223,15 +230,18 @@ int lock_request::start(void) {
insert_into_lock_requests(); insert_into_lock_requests();
if (deadlock_exists(conflicts)) { if (deadlock_exists(conflicts)) {
remove_from_lock_requests(); remove_from_lock_requests();
complete(DB_LOCK_DEADLOCK); r = DB_LOCK_DEADLOCK;
} }
toku_mutex_unlock(&m_info->mutex); toku_mutex_unlock(&m_info->mutex);
} else { if (m_start_test_callback) m_start_test_callback(); // test callback
}
if (r != DB_LOCK_NOTGRANTED) {
complete(r); complete(r);
} }
conflicts.destroy(); conflicts.destroy();
return m_state == state::COMPLETE ? m_complete_r : r; return r;
} }
// sleep on the lock request until it becomes resolved or the wait time has elapsed. // sleep on the lock request until it becomes resolved or the wait time has elapsed.
...@@ -292,8 +302,8 @@ int lock_request::wait(uint64_t wait_time_ms, uint64_t killed_time_ms, int (*kil ...@@ -292,8 +302,8 @@ int lock_request::wait(uint64_t wait_time_ms, uint64_t killed_time_ms, int (*kil
// complete this lock request with the given return value // complete this lock request with the given return value
void lock_request::complete(int complete_r) { void lock_request::complete(int complete_r) {
m_state = state::COMPLETE;
m_complete_r = complete_r; m_complete_r = complete_r;
m_state = state::COMPLETE;
} }
const DBT *lock_request::get_left_key(void) const { const DBT *lock_request::get_left_key(void) const {
...@@ -331,6 +341,7 @@ int lock_request::retry(void) { ...@@ -331,6 +341,7 @@ int lock_request::retry(void) {
if (r == 0) { if (r == 0) {
remove_from_lock_requests(); remove_from_lock_requests();
complete(r); complete(r);
if (m_retry_test_callback) m_retry_test_callback(); // test callback
toku_cond_broadcast(&m_wait_cond); toku_cond_broadcast(&m_wait_cond);
} }
...@@ -416,7 +427,8 @@ void lock_request::remove_from_lock_requests(void) { ...@@ -416,7 +427,8 @@ void lock_request::remove_from_lock_requests(void) {
uint32_t idx; uint32_t idx;
lock_request *request; lock_request *request;
int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(m_txnid, &request, &idx); int r = m_info->pending_lock_requests.find_zero<TXNID, find_by_txnid>(m_txnid, &request, &idx);
invariant_zero(r && request == this); invariant_zero(r);
invariant(request == this);
r = m_info->pending_lock_requests.delete_at(idx); r = m_info->pending_lock_requests.delete_at(idx);
invariant_zero(r); invariant_zero(r);
} }
...@@ -432,4 +444,12 @@ int lock_request::find_by_txnid(lock_request * const &request, const TXNID &txni ...@@ -432,4 +444,12 @@ int lock_request::find_by_txnid(lock_request * const &request, const TXNID &txni
} }
} }
void lock_request::set_start_test_callback(void (*f)(void)) {
m_start_test_callback = f;
}
void lock_request::set_retry_test_callback(void (*f)(void)) {
m_retry_test_callback = f;
}
} /* namespace toku */ } /* namespace toku */
...@@ -164,6 +164,8 @@ public: ...@@ -164,6 +164,8 @@ public:
// The rest remain pending. // The rest remain pending.
static void retry_all_lock_requests(locktree *lt); static void retry_all_lock_requests(locktree *lt);
void set_start_test_callback(void (*f)(void));
void set_retry_test_callback(void (*f)(void));
private: private:
enum state { enum state {
...@@ -171,6 +173,7 @@ private: ...@@ -171,6 +173,7 @@ private:
INITIALIZED, INITIALIZED,
PENDING, PENDING,
COMPLETE, COMPLETE,
DESTROYED,
}; };
// The keys for a lock request are stored "unowned" in m_left_key // The keys for a lock request are stored "unowned" in m_left_key
...@@ -236,6 +239,9 @@ private: ...@@ -236,6 +239,9 @@ private:
static int find_by_txnid(lock_request * const &request, const TXNID &txnid); static int find_by_txnid(lock_request * const &request, const TXNID &txnid);
void (*m_start_test_callback)(void);
void (*m_retry_test_callback)(void);
friend class lock_request_unit_test; friend class lock_request_unit_test;
}; };
ENSURE_POD(lock_request); ENSURE_POD(lock_request);
......
...@@ -152,6 +152,7 @@ void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id, const compar ...@@ -152,6 +152,7 @@ void locktree::create(locktree_manager *mgr, DICTIONARY_ID dict_id, const compar
void locktree::destroy(void) { void locktree::destroy(void) {
invariant(m_reference_count == 0); invariant(m_reference_count == 0);
invariant(m_lock_request_info.pending_lock_requests.size() == 0);
m_cmp.destroy(); m_cmp.destroy();
m_rangetree->destroy(); m_rangetree->destroy();
toku_free(m_rangetree); toku_free(m_rangetree);
......
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*
COPYING CONDITIONS NOTICE:
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation, and provided that the
following conditions are met:
* Redistributions of source code must retain this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below).
* Redistributions in binary form must reproduce this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below) in the documentation and/or other materials
provided with the distribution.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
COPYRIGHT NOTICE:
TokuFT, Tokutek Fractal Tree Indexing Library.
Copyright (C) 2007-2013 Tokutek, Inc.
DISCLAIMER:
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
UNIVERSITY PATENT NOTICE:
The technology is licensed by the Massachusetts Institute of
Technology, Rutgers State University of New Jersey, and the Research
Foundation of State University of New York at Stony Brook under
United States of America Serial No. 11/760379 and to the patents
and/or patent applications resulting from it.
PATENT MARKING NOTICE:
This software is covered by US Patent No. 8,185,551.
This software is covered by US Patent No. 8,489,638.
PATENT RIGHTS GRANT:
"THIS IMPLEMENTATION" means the copyrightable works distributed by
Tokutek as part of the Fractal Tree project.
"PATENT CLAIMS" means the claims of patents that are owned or
licensable by Tokutek, both currently or in the future; and that in
the absence of this license would be infringed by THIS
IMPLEMENTATION or by using or running THIS IMPLEMENTATION.
"PATENT CHALLENGE" shall mean a challenge to the validity,
patentability, enforceability and/or non-infringement of any of the
PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS.
Tokutek hereby grants to you, for the term and geographical scope of
the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to
make, have made, use, offer to sell, sell, import, transfer, and
otherwise run, modify, and propagate the contents of THIS
IMPLEMENTATION, where such license applies only to the PATENT
CLAIMS. This grant does not include claims that would be infringed
only as a consequence of further modifications of THIS
IMPLEMENTATION. If you or your agent or licensee institute or order
or agree to the institution of patent litigation against any entity
(including a cross-claim or counterclaim in a lawsuit) alleging that
THIS IMPLEMENTATION constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any rights
granted to you under this License shall terminate as of the date
such litigation is filed. If you or your agent or exclusive
licensee institute or order or agree to the institution of a PATENT
CHALLENGE, then Tokutek may terminate any rights granted to you
under this License.
*/
#ident "Copyright (c) 2014 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <iostream>
#include "test.h"
#include "locktree.h"
#include "lock_request.h"
// Test FT-633, the data race on the lock request between ::start and ::retry
// This test is non-deterministic. It uses sleeps at 2 critical places to
// expose the data race on the lock requests state.
namespace toku {
struct locker_arg {
locktree *_lt;
TXNID _id;
const DBT *_key;
locker_arg(locktree *lt, TXNID id, const DBT *key) : _lt(lt), _id(id), _key(key) {
}
};
static void locker_callback(void) {
usleep(10000);
}
static void run_locker(locktree *lt, TXNID txnid, const DBT *key) {
int i;
for (i = 0; i < 1000; i++) {
lock_request request;
request.create();
request.set(lt, txnid, key, key, lock_request::type::WRITE, false);
// set the test callbacks
request.set_start_test_callback(locker_callback);
request.set_retry_test_callback(locker_callback);
// try to acquire the lock
int r = request.start();
if (r == DB_LOCK_NOTGRANTED) {
// wait for the lock to be granted
r = request.wait(10 * 1000);
}
if (r == 0) {
// release the lock
range_buffer buffer;
buffer.create();
buffer.append(key, key);
lt->release_locks(txnid, &buffer);
buffer.destroy();
// retry pending lock requests
lock_request::retry_all_lock_requests(lt);
}
request.destroy();
memset(&request, 0xab, sizeof request);
toku_pthread_yield();
if ((i % 10) == 0)
std::cout << toku_pthread_self() << " " << i << std::endl;
}
}
static void *locker(void *v_arg) {
locker_arg *arg = static_cast<locker_arg *>(v_arg);
run_locker(arg->_lt, arg->_id, arg->_key);
return arg;
}
} /* namespace toku */
int main(void) {
int r;
toku::locktree lt;
DICTIONARY_ID dict_id = { 1 };
lt.create(nullptr, dict_id, toku::dbt_comparator);
const DBT *one = toku::get_dbt(1);
const int n_workers = 2;
toku_pthread_t ids[n_workers];
for (int i = 0; i < n_workers; i++) {
toku::locker_arg *arg = new toku::locker_arg(&lt, i, one);
r = toku_pthread_create(&ids[i], nullptr, toku::locker, arg);
assert_zero(r);
}
for (int i = 0; i < n_workers; i++) {
void *ret;
r = toku_pthread_join(ids[i], &ret);
assert_zero(r);
toku::locker_arg *arg = static_cast<toku::locker_arg *>(ret);
delete arg;
}
lt.release_reference();
lt.destroy();
return 0;
}
...@@ -307,6 +307,7 @@ void toku_db_grab_write_lock (DB *db, DBT *key, TOKUTXN tokutxn) { ...@@ -307,6 +307,7 @@ void toku_db_grab_write_lock (DB *db, DBT *key, TOKUTXN tokutxn) {
int r = request.start(); int r = request.start();
invariant_zero(r); invariant_zero(r);
db_txn_note_row_lock(db, txn_anc, key, key); db_txn_note_row_lock(db, txn_anc, key, key);
request.destroy();
} }
void toku_db_release_lt_key_ranges(DB_TXN *txn, txn_lt_key_ranges *ranges) { void toku_db_release_lt_key_ranges(DB_TXN *txn, txn_lt_key_ranges *ranges) {
......
...@@ -422,6 +422,11 @@ static int toku_txn_discard(DB_TXN *txn, uint32_t flags) { ...@@ -422,6 +422,11 @@ static int toku_txn_discard(DB_TXN *txn, uint32_t flags) {
return 0; return 0;
} }
static bool toku_txn_is_prepared(DB_TXN *txn) {
TOKUTXN ttxn = db_txn_struct_i(txn)->tokutxn;
return toku_txn_get_state(ttxn) == TOKUTXN_PREPARING;
}
static inline void txn_func_init(DB_TXN *txn) { static inline void txn_func_init(DB_TXN *txn) {
#define STXN(name) txn->name = locked_txn_ ## name #define STXN(name) txn->name = locked_txn_ ## name
STXN(abort); STXN(abort);
...@@ -438,6 +443,7 @@ static inline void txn_func_init(DB_TXN *txn) { ...@@ -438,6 +443,7 @@ static inline void txn_func_init(DB_TXN *txn) {
SUTXN(discard); SUTXN(discard);
#undef SUTXN #undef SUTXN
txn->id64 = toku_txn_id64; txn->id64 = toku_txn_id64;
txn->is_prepared = toku_txn_is_prepared;
} }
// //
......
...@@ -1742,7 +1742,7 @@ int ha_tokudb::initialize_share(const char* name, int mode) { ...@@ -1742,7 +1742,7 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
// initialize cardinality info from the status dictionary // initialize cardinality info from the status dictionary
share->n_rec_per_key = tokudb::compute_total_key_parts(table_share); share->n_rec_per_key = tokudb::compute_total_key_parts(table_share);
share->rec_per_key = (uint64_t *) tokudb_my_realloc(share->rec_per_key, share->n_rec_per_key * sizeof (uint64_t), MYF(MY_FAE)); share->rec_per_key = (uint64_t *) tokudb_my_realloc(share->rec_per_key, share->n_rec_per_key * sizeof (uint64_t), MYF(MY_FAE + MY_ALLOW_ZERO_PTR));
error = tokudb::get_card_from_status(share->status_block, txn, share->n_rec_per_key, share->rec_per_key); error = tokudb::get_card_from_status(share->status_block, txn, share->n_rec_per_key, share->rec_per_key);
if (error) { if (error) {
for (uint i = 0; i < share->n_rec_per_key; i++) for (uint i = 0; i < share->n_rec_per_key; i++)
...@@ -5995,6 +5995,9 @@ int ha_tokudb::extra(enum ha_extra_function operation) { ...@@ -5995,6 +5995,9 @@ int ha_tokudb::extra(enum ha_extra_function operation) {
case HA_EXTRA_NO_IGNORE_NO_KEY: case HA_EXTRA_NO_IGNORE_NO_KEY:
using_ignore_no_key = false; using_ignore_no_key = false;
break; break;
case HA_EXTRA_NOT_USED:
case HA_EXTRA_PREPARE_FOR_RENAME:
break; // must do nothing and return 0
default: default:
break; break;
} }
...@@ -6240,7 +6243,11 @@ int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) { ...@@ -6240,7 +6243,11 @@ int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) {
int error = 0; int error = 0;
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton); tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
DBUG_ASSERT(trx); if (!trx) {
error = create_tokudb_trx_data_instance(&trx);
if (error) { goto cleanup; }
thd_set_ha_data(thd, tokudb_hton, trx);
}
/* /*
note that trx->stmt may have been already initialized as start_stmt() note that trx->stmt may have been already initialized as start_stmt()
......
...@@ -765,7 +765,9 @@ bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_i ...@@ -765,7 +765,9 @@ bool ha_tokudb::commit_inplace_alter_table(TABLE *altered_table, Alter_inplace_i
#else #else
THD::killed_state saved_killed_state = thd->killed; THD::killed_state saved_killed_state = thd->killed;
thd->killed = THD::NOT_KILLED; thd->killed = THD::NOT_KILLED;
for (volatile uint i = 0; wait_while_table_is_used(thd, table, HA_EXTRA_NOT_USED); i++) { // MySQL does not handle HA_EXTRA_NOT_USED so we use HA_EXTRA_PREPARE_FOR_RENAME since it is passed through
// the partition storage engine and is treated as a NOP by tokudb
for (volatile uint i = 0; wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME); i++) {
if (thd->killed != THD::NOT_KILLED) if (thd->killed != THD::NOT_KILLED)
thd->killed = THD::NOT_KILLED; thd->killed = THD::NOT_KILLED;
sleep(1); sleep(1);
......
...@@ -425,10 +425,10 @@ static int tokudb_init_func(void *p) { ...@@ -425,10 +425,10 @@ static int tokudb_init_func(void *p) {
tokudb_hton->commit = tokudb_commit; tokudb_hton->commit = tokudb_commit;
tokudb_hton->rollback = tokudb_rollback; tokudb_hton->rollback = tokudb_rollback;
#if TOKU_INCLUDE_XA #if TOKU_INCLUDE_XA
tokudb_hton->prepare=tokudb_xa_prepare; tokudb_hton->prepare = tokudb_xa_prepare;
tokudb_hton->recover=tokudb_xa_recover; tokudb_hton->recover = tokudb_xa_recover;
tokudb_hton->commit_by_xid=tokudb_commit_by_xid; tokudb_hton->commit_by_xid = tokudb_commit_by_xid;
tokudb_hton->rollback_by_xid=tokudb_rollback_by_xid; tokudb_hton->rollback_by_xid = tokudb_rollback_by_xid;
#endif #endif
tokudb_hton->table_options= tokudb_table_options; tokudb_hton->table_options= tokudb_table_options;
...@@ -785,16 +785,35 @@ static void tokudb_cleanup_handlers(tokudb_trx_data *trx, DB_TXN *txn) { ...@@ -785,16 +785,35 @@ static void tokudb_cleanup_handlers(tokudb_trx_data *trx, DB_TXN *txn) {
} }
} }
#if MYSQL_VERSION_ID >= 50600
extern "C" enum durability_properties thd_get_durability_property(const MYSQL_THD thd);
#endif
// Determine if an fsync is used when a transaction is committed.
static bool tokudb_fsync_on_commit(THD *thd, tokudb_trx_data *trx, DB_TXN *txn) {
#if MYSQL_VERSION_ID >= 50600
// Check the client durability property which is set during 2PC
if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
return false;
#endif
#if defined(MARIADB_BASE_VERSION)
// Check is the txn is prepared and the binlog is open
if (txn->is_prepared(txn) && mysql_bin_log.is_open())
return false;
#endif
return THDVAR(thd, commit_sync) != 0;
}
static int tokudb_commit(handlerton * hton, THD * thd, bool all) { static int tokudb_commit(handlerton * hton, THD * thd, bool all) {
TOKUDB_DBUG_ENTER(""); TOKUDB_DBUG_ENTER("");
DBUG_PRINT("trans", ("ending transaction %s", all ? "all" : "stmt")); DBUG_PRINT("trans", ("ending transaction %s", all ? "all" : "stmt"));
uint32_t syncflag = THDVAR(thd, commit_sync) ? 0 : DB_TXN_NOSYNC;
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton); tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton);
DB_TXN **txn = all ? &trx->all : &trx->stmt; DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn; DB_TXN *this_txn = *txn;
if (this_txn) { if (this_txn) {
uint32_t syncflag = tokudb_fsync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
if (tokudb_debug & TOKUDB_DEBUG_TXN) { if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_TRACE("commit trx %u txn %p", all, this_txn); TOKUDB_TRACE("commit trx %u txn %p syncflag %u", all, this_txn, syncflag);
} }
// test hook to induce a crash on a debug build // test hook to induce a crash on a debug build
DBUG_EXECUTE_IF("tokudb_crash_commit_before", DBUG_SUICIDE();); DBUG_EXECUTE_IF("tokudb_crash_commit_before", DBUG_SUICIDE(););
...@@ -848,7 +867,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) { ...@@ -848,7 +867,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
TOKUDB_DBUG_ENTER(""); TOKUDB_DBUG_ENTER("");
int r = 0; int r = 0;
/* if support_xa is disable, just return */ // if tokudb_support_xa is disable, just return
if (!THDVAR(thd, support_xa)) { if (!THDVAR(thd, support_xa)) {
TOKUDB_DBUG_RETURN(r); TOKUDB_DBUG_RETURN(r);
} }
...@@ -1608,12 +1627,12 @@ static ST_FIELD_INFO tokudb_fractal_tree_info_field_info[] = { ...@@ -1608,12 +1627,12 @@ static ST_FIELD_INFO tokudb_fractal_tree_info_field_info[] = {
static int tokudb_report_fractal_tree_info_for_db(const DBT *dname, const DBT *iname, TABLE *table, THD *thd) { static int tokudb_report_fractal_tree_info_for_db(const DBT *dname, const DBT *iname, TABLE *table, THD *thd) {
int error; int error;
DB *db;
uint64_t bt_num_blocks_allocated; uint64_t bt_num_blocks_allocated;
uint64_t bt_num_blocks_in_use; uint64_t bt_num_blocks_in_use;
uint64_t bt_size_allocated; uint64_t bt_size_allocated;
uint64_t bt_size_in_use; uint64_t bt_size_in_use;
DB *db = NULL;
error = db_create(&db, db_env, 0); error = db_create(&db, db_env, 0);
if (error) { if (error) {
goto exit; goto exit;
...@@ -1625,12 +1644,6 @@ static int tokudb_report_fractal_tree_info_for_db(const DBT *dname, const DBT *i ...@@ -1625,12 +1644,6 @@ static int tokudb_report_fractal_tree_info_for_db(const DBT *dname, const DBT *i
error = db->get_fractal_tree_info64(db, error = db->get_fractal_tree_info64(db,
&bt_num_blocks_allocated, &bt_num_blocks_in_use, &bt_num_blocks_allocated, &bt_num_blocks_in_use,
&bt_size_allocated, &bt_size_in_use); &bt_size_allocated, &bt_size_in_use);
{
int close_error = db->close(db, 0);
if (!error) {
error = close_error;
}
}
if (error) { if (error) {
goto exit; goto exit;
} }
...@@ -1662,6 +1675,11 @@ static int tokudb_report_fractal_tree_info_for_db(const DBT *dname, const DBT *i ...@@ -1662,6 +1675,11 @@ static int tokudb_report_fractal_tree_info_for_db(const DBT *dname, const DBT *i
error = schema_table_store_record(thd, table); error = schema_table_store_record(thd, table);
exit: exit:
if (db) {
int close_error = db->close(db, 0);
if (error == 0)
error = close_error;
}
return error; return error;
} }
......
...@@ -6,6 +6,7 @@ Table Create Table ...@@ -6,6 +6,7 @@ Table Create Table
t CREATE TABLE `t` ( t CREATE TABLE `t` (
`a` varchar(1) DEFAULT NULL `a` varchar(1) DEFAULT NULL
) ENGINE=TokuDB DEFAULT CHARSET=latin1 ) ENGINE=TokuDB DEFAULT CHARSET=latin1
INSERT INTO t VALUES (null);
ALTER TABLE t CHANGE COLUMN a a VARCHAR(2); ALTER TABLE t CHANGE COLUMN a a VARCHAR(2);
ALTER TABLE t CHANGE COLUMN a a VARCHAR(2); ALTER TABLE t CHANGE COLUMN a a VARCHAR(2);
ALTER TABLE t CHANGE COLUMN a a VARCHAR(3); ALTER TABLE t CHANGE COLUMN a a VARCHAR(3);
......
...@@ -65,6 +65,7 @@ TOKUDB_CHECKPOINT_LONG_BEGIN_COUNT ...@@ -65,6 +65,7 @@ TOKUDB_CHECKPOINT_LONG_BEGIN_COUNT
TOKUDB_CHECKPOINT_LONG_BEGIN_TIME TOKUDB_CHECKPOINT_LONG_BEGIN_TIME
TOKUDB_CHECKPOINT_PERIOD TOKUDB_CHECKPOINT_PERIOD
TOKUDB_CHECKPOINT_TAKEN TOKUDB_CHECKPOINT_TAKEN
TOKUDB_CURSOR_SKIP_DELETED_LEAF_ENTRY
TOKUDB_DB_CLOSES TOKUDB_DB_CLOSES
TOKUDB_DB_OPENS TOKUDB_DB_OPENS
TOKUDB_DB_OPEN_CURRENT TOKUDB_DB_OPEN_CURRENT
......
...@@ -65,8 +65,8 @@ while ($i < $maxq) { ...@@ -65,8 +65,8 @@ while ($i < $maxq) {
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# Check that the time with bulk fetch off is at least twice that whith bulk fetch on # Check that the time with bulk fetch off is greater that with bulk fetch on
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_on $time_elapsed_off; } if ($debug) { echo index $verdict $time_elapsed_on $time_elapsed_off; }
if (!$verdict) { echo index $time_elapsed_on $time_elapsed_off; } if (!$verdict) { echo index $time_elapsed_on $time_elapsed_off; }
...@@ -94,8 +94,8 @@ while ($i < $maxq) { ...@@ -94,8 +94,8 @@ while ($i < $maxq) {
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# Check that the time with bulk fetch off is at least twice that whith bulk fetch on # Check that the time with bulk fetch off is greater that with bulk fetch on
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_on $time_elapsed_off; } if ($debug) { echo range $verdict $time_elapsed_on $time_elapsed_off; }
if (!$verdict) { echo range $time_elapsed_on $time_elapsed_off; } if (!$verdict) { echo range $time_elapsed_on $time_elapsed_off; }
......
...@@ -65,8 +65,8 @@ while ($i < $maxq) { ...@@ -65,8 +65,8 @@ while ($i < $maxq) {
} }
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# check that bulk fetch on is at least 1.5 times faster than bulk fetch off # check that bulk fetch on is greater than bulk fetch off
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 1.5 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; } if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; } if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; }
...@@ -93,8 +93,8 @@ while ($i < $maxq) { ...@@ -93,8 +93,8 @@ while ($i < $maxq) {
} }
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# check that bulk fetch on is at least 1.5 times faster than bulk fetch off # check that bulk fetch on is greater than bulk fetch off
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 1.5 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; } if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; } if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; }
......
...@@ -72,8 +72,8 @@ while ($i < $maxq) { ...@@ -72,8 +72,8 @@ while ($i < $maxq) {
} }
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# check that bulk fetch on is at least 2 times faster than bulk fetch off # check that bulk fetch on is greater than bulk fetch off
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; } if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; } if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; }
...@@ -100,8 +100,8 @@ while ($i < $maxq) { ...@@ -100,8 +100,8 @@ while ($i < $maxq) {
} }
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# check that bulk fetch on is at least 2 times faster than bulk fetch off # check that bulk fetch on is greater than bulk fetch off
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; } if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; } if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; }
......
...@@ -65,8 +65,8 @@ while ($i < $maxq) { ...@@ -65,8 +65,8 @@ while ($i < $maxq) {
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# Check that the time with bulk fetch off is at least twice that whith bulk fetch on # Check that the time with bulk fetch off is greater that with bulk fetch on
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_on $time_elapsed_off; } if ($debug) { echo index $verdict $time_elapsed_on $time_elapsed_off; }
if (!$verdict) { echo index $time_elapsed_on $time_elapsed_off; } if (!$verdict) { echo index $time_elapsed_on $time_elapsed_off; }
...@@ -94,8 +94,8 @@ while ($i < $maxq) { ...@@ -94,8 +94,8 @@ while ($i < $maxq) {
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# Check that the time with bulk fetch off is at least twice that whith bulk fetch on # Check that the time with bulk fetch off is greater than with bulk fetch on
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_on $time_elapsed_off; } if ($debug) { echo range $verdict $time_elapsed_on $time_elapsed_off; }
if (!$verdict) { echo range $time_elapsed_on $time_elapsed_off; } if (!$verdict) { echo range $time_elapsed_on $time_elapsed_off; }
......
# Verify that index scans for delete statements use bulk fetch and are # Verify that index scans for delete statements use bulk fetch are faster than when not using bulk fetch
# at least twice as fast
source include/have_tokudb.inc; source include/have_tokudb.inc;
source include/big_test.inc; source include/big_test.inc;
...@@ -62,8 +61,8 @@ while ($i < $maxq) { ...@@ -62,8 +61,8 @@ while ($i < $maxq) {
} }
let $time_elapsed_bf_off = `select unix_timestamp() - $s`; let $time_elapsed_bf_off = `select unix_timestamp() - $s`;
# verify that a delete scan with bulk fetch ON is at least 2 times faster than with bulk fetch OFF # verify that a delete scan with bulk fetch ON is greater than with bulk fetch OFF
let $verdict = `select $time_elapsed_bf_on > 0 && $time_elapsed_bf_off >= 2 * $time_elapsed_bf_on`; let $verdict = `select $time_elapsed_bf_on > 0 && $time_elapsed_bf_off > $time_elapsed_bf_on`;
echo $verdict; echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_bf_on $time_elapsed_bf_off; } if ($debug) { echo range $verdict $time_elapsed_bf_on $time_elapsed_bf_off; }
if (!$verdict) { echo range $time_elapsed_bf_on $time_elapsed_bf_off; } if (!$verdict) { echo range $time_elapsed_bf_on $time_elapsed_bf_off; }
......
...@@ -66,8 +66,8 @@ while ($i < $maxq) { ...@@ -66,8 +66,8 @@ while ($i < $maxq) {
} }
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# check that bulk fetch on is at least 2 times faster than bulk fetch off # check that the time with bulk fetch on is greater than with bulk fetch off
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; } if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; } if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; }
...@@ -92,7 +92,7 @@ while ($i < $maxq) { ...@@ -92,7 +92,7 @@ while ($i < $maxq) {
} }
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; } if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; } if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; }
......
...@@ -70,8 +70,8 @@ while ($i < $maxq) { ...@@ -70,8 +70,8 @@ while ($i < $maxq) {
} }
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# check that bulk fetch on is at least 2 times faster than bulk fetch off # check that bulk fetch on is greater than with bulk fetch off
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; } if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; } if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; }
...@@ -96,8 +96,8 @@ while ($i < $maxq) { ...@@ -96,8 +96,8 @@ while ($i < $maxq) {
} }
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# check that bulk fetch on is at least 2 times faster than bulk fetch off # check that bulk fetch on is greater than bulk fetch off
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; } if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; } if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; }
......
...@@ -66,8 +66,8 @@ while ($i < $maxq) { ...@@ -66,8 +66,8 @@ while ($i < $maxq) {
} }
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
# check that bulk fetch on is at least 2 times faster than bulk fetch off # check that the time with bulk fetch on is greater than with bulk fetch off
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; } if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; } if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; }
...@@ -92,7 +92,8 @@ while ($i < $maxq) { ...@@ -92,7 +92,8 @@ while ($i < $maxq) {
} }
let $time_elapsed_off = `select unix_timestamp() - $s`; let $time_elapsed_off = `select unix_timestamp() - $s`;
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`; # check that the time with bulk fetch on is greater than with bulk fetch off
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off > $time_elapsed_on`;
echo $verdict; echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; } if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; } if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; }
......
...@@ -65,8 +65,8 @@ let $time_bf_off = `select unix_timestamp() - $s`; ...@@ -65,8 +65,8 @@ let $time_bf_off = `select unix_timestamp() - $s`;
if ($debug) { echo index scans took $time_bf_off.; } if ($debug) { echo index scans took $time_bf_off.; }
# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on # check that the scan time with bulk fetch off is greater than with bulk fetch on
let $verdict = `select $time_bf_on > 0 && $time_bf_off >= 1.5 * $time_bf_on`; let $verdict = `select $time_bf_on > 0 && $time_bf_off > $time_bf_on`;
echo $verdict; echo $verdict;
if ($debug) { echo index $verdict $time_bf_on $time_bf_off; } if ($debug) { echo index $verdict $time_bf_on $time_bf_off; }
if (!$verdict) { echo index $time_bf_on $time_bf_off; } if (!$verdict) { echo index $time_bf_on $time_bf_off; }
...@@ -93,8 +93,8 @@ let $time_bf_off = `select unix_timestamp() - $s`; ...@@ -93,8 +93,8 @@ let $time_bf_off = `select unix_timestamp() - $s`;
if ($debug) { echo range scans took $time_bf_off.; } if ($debug) { echo range scans took $time_bf_off.; }
# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on # check that the scan time with bulk fetch off is greater than with bulk fetch on
let $verdict = `select $time_bf_on > 0 && $time_bf_off >= 1.5 * $time_bf_on`; let $verdict = `select $time_bf_on > 0 && $time_bf_off > $time_bf_on`;
echo $verdict; echo $verdict;
if ($debug) { echo range $verdict $time_bf_on $time_bf_off; } if ($debug) { echo range $verdict $time_bf_on $time_bf_off; }
if (!$verdict) { echo range $time_bf_on $time_bf_off; } if (!$verdict) { echo range $time_bf_on $time_bf_off; }
......
...@@ -70,8 +70,8 @@ while ($i < $maxq) { ...@@ -70,8 +70,8 @@ while ($i < $maxq) {
} }
let $time_bf_off = `select unix_timestamp() - $s`; let $time_bf_off = `select unix_timestamp() - $s`;
# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on # check that the scan time with bulk fetch off is greater than with bulk fetch on
let $verdict = `select $time_bf_on > 0 && $time_bf_off >= 1.5 * $time_bf_on`; let $verdict = `select $time_bf_on > 0 && $time_bf_off > $time_bf_on`;
echo $verdict; echo $verdict;
if ($debug) { echo index $verdict $time_bf_on $time_bf_off; } if ($debug) { echo index $verdict $time_bf_on $time_bf_off; }
if (!$verdict) { echo index scan $time_bf_on $time_bf_off; } if (!$verdict) { echo index scan $time_bf_on $time_bf_off; }
...@@ -94,8 +94,8 @@ while ($i < $maxq) { ...@@ -94,8 +94,8 @@ while ($i < $maxq) {
} }
let $time_bf_off = `select unix_timestamp() - $s`; let $time_bf_off = `select unix_timestamp() - $s`;
# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on # check that the scan time with bulk fetch off is greater than with bulk fetch on
let $verdict = `select $time_bf_on > 0 && $time_bf_off >= 1.5 * $time_bf_on`; let $verdict = `select $time_bf_on > 0 && $time_bf_off > $time_bf_on`;
echo $verdict; echo $verdict;
if ($debug) { echo range $verdict $time_bf_on $time_bf_off; } if ($debug) { echo range $verdict $time_bf_on $time_bf_off; }
if (!$verdict) { echo range $time_bf_on $time_bf_off; } if (!$verdict) { echo range $time_bf_on $time_bf_off; }
......
...@@ -9,6 +9,7 @@ SET SESSION TOKUDB_DISABLE_SLOW_ALTER=ON; ...@@ -9,6 +9,7 @@ SET SESSION TOKUDB_DISABLE_SLOW_ALTER=ON;
CREATE TABLE t (a VARCHAR(1)) ENGINE=TokuDB; CREATE TABLE t (a VARCHAR(1)) ENGINE=TokuDB;
SHOW CREATE TABLE t; SHOW CREATE TABLE t;
INSERT INTO t VALUES (null);
# 1->1 # 1->1
let $i=1 let $i=1
......
drop table if exists t1,t2;
create table t1 (x int) engine=innodb;
lock table t1 read;
create temporary table t2 (x int) engine=tokudb;
insert into t2 values (1);
unlock tables;
drop table t1, t2;
set default_storage_engine=TokuDB;
drop table if exists t1;
CREATE TABLE t1(c1 INT,c2 CHAR)PARTITION BY KEY(c1) PARTITIONS 5;
insert INTO t1 values(1,1),(2,1),(2,2),(2,3);
ALTER TABLE t1 ADD UNIQUE INDEX i1(c1);
ERROR 23000: Duplicate entry '2' for key 'i1'
drop table t1;
set default_storage_engine='tokudb';
drop table if exists t;
create table t (id int primary key);
set autocommit=OFF;
lock tables t write;
optimize table t;
Table Op Msg_type Msg_text
test.t optimize status OK
unlock tables;
drop table t;
set default_storage_engine=tokudb;
drop table if exists t1;
CREATE TABLE t1 (a int key, b varchar(32), c varchar(32));
REPLACE t1 SET a = 4;
ALTER TABLE t1 CHANGE COLUMN c c VARCHAR(500);
update t1 set b='hi';
update t1 set c='there';
select * from t1;
a b c
4 hi there
drop table t1;
--tokudb-cache-size=1000000000 --innodb-buffer-pool-size=1000000000
# test for DB-762 and DB-767
source include/have_tokudb.inc;
source include/have_innodb.inc;
disable_warnings;
drop table if exists t1,t2;
enable_warnings;
create table t1 (x int) engine=innodb;
lock table t1 read;
create temporary table t2 (x int) engine=tokudb;
insert into t2 values (1);
unlock tables;
drop table t1, t2;
# reproducer for DB-766
source include/have_tokudb.inc;
source include/have_partition.inc;
set default_storage_engine=TokuDB;
disable_warnings;
drop table if exists t1;
enable_warnings;
CREATE TABLE t1(c1 INT,c2 CHAR)PARTITION BY KEY(c1) PARTITIONS 5;
insert INTO t1 values(1,1),(2,1),(2,2),(2,3);
--error ER_DUP_ENTRY
ALTER TABLE t1 ADD UNIQUE INDEX i1(c1);
drop table t1;
# test case for DB-768
source include/have_tokudb.inc;
set default_storage_engine='tokudb';
disable_warnings;
drop table if exists t;
enable_warnings;
create table t (id int primary key);
set autocommit=OFF;
lock tables t write;
optimize table t;
unlock tables;
drop table t;
# test case for DB-771
source include/have_tokudb.inc;
set default_storage_engine=tokudb;
disable_warnings;
drop table if exists t1;
enable_warnings;
CREATE TABLE t1 (a int key, b varchar(32), c varchar(32));
REPLACE t1 SET a = 4;
ALTER TABLE t1 CHANGE COLUMN c c VARCHAR(500);
update t1 set b='hi';
update t1 set c='there';
select * from t1;
drop table t1;
...@@ -851,7 +851,7 @@ static int tokudb_expand_variable_offsets( ...@@ -851,7 +851,7 @@ static int tokudb_expand_variable_offsets(
DBT new_val; memset(&new_val, 0, sizeof new_val); DBT new_val; memset(&new_val, 0, sizeof new_val);
if (old_val != NULL) { if (old_val != NULL) {
assert(offset_start + number_of_offsets < old_val->size); assert(offset_start + number_of_offsets <= old_val->size);
// compute the new val from the old val // compute the new val from the old val
uchar *old_val_ptr = (uchar *)old_val->data; uchar *old_val_ptr = (uchar *)old_val->data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment