Commit 1c739931 authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

refs #5467, undo checkin

git-svn-id: file:///svn/toku/tokudb@47900 c7de825b-a66e-492c-adef-691d508d4ae1
parent 17e0b228
......@@ -324,7 +324,7 @@ public:
int begin_checkpoint();
void add_background_job();
void remove_background_job();
int end_checkpoint(bool aggressive, void (*testcallback_f)(void*), void* testextra);
int end_checkpoint(void (*testcallback_f)(void*), void* testextra);
TOKULOGGER get_logger();
// used during begin_checkpoint
void increment_num_txns();
......@@ -347,7 +347,7 @@ private:
void turn_on_pending_bits();
// private methods for end_checkpoint
void fill_checkpoint_cfs(CACHEFILE* checkpoint_cfs);
void checkpoint_pending_pairs(bool aggressive);
void checkpoint_pending_pairs();
void checkpoint_userdata(CACHEFILE* checkpoint_cfs);
void log_end_checkpoint();
void end_checkpoint_userdata(CACHEFILE* checkpoint_cfs);
......
......@@ -563,7 +563,7 @@ static void cachetable_free_pair(PAIR p) {
// cachetable_remove_pair, we cannot pass in p->cachefile and p->cachefile->fd
// for the first two parameters, as these may be invalid (#5171), so, we
// pass in NULL and -1, dummy values
flush_callback(NULL, -1, key, value, &disk_data, write_extraargs, old_attr, &new_attr, false, false, true, false, false);
flush_callback(NULL, -1, key, value, &disk_data, write_extraargs, old_attr, &new_attr, false, false, true, false);
ctpair_destroy(p);
}
......@@ -613,8 +613,7 @@ static void cachetable_only_write_locked_data(
PAIR p,
bool for_checkpoint,
PAIR_ATTR* new_attr,
bool is_clone,
bool aggressive
bool is_clone
)
{
CACHETABLE_FLUSH_CALLBACK flush_callback = p->flush_callback;
......@@ -649,8 +648,7 @@ static void cachetable_only_write_locked_data(
dowrite,
is_clone ? false : true, // keep_me (only keep if this is not cloned pointer)
for_checkpoint,
is_clone, //is_clone
aggressive
is_clone //is_clone
);
p->disk_data = disk_data;
if (is_clone) {
......@@ -691,7 +689,7 @@ static void cachetable_write_locked_pair(
// there should be no cloned value data
assert(p->cloned_value_data == NULL);
if (p->dirty) {
cachetable_only_write_locked_data(ev, p, for_checkpoint, &new_attr, false, false);
cachetable_only_write_locked_data(ev, p, for_checkpoint, &new_attr, false);
//
// now let's update variables
//
......@@ -926,8 +924,7 @@ static void checkpoint_cloned_pair(void* extra) {
p,
true, //for_checkpoint
&new_attr,
true, //is_clone
false // aggressive
true //is_clone
);
pair_lock(p);
nb_mutex_unlock(&p->disk_nb_mutex);
......@@ -981,7 +978,7 @@ write_locked_pair_for_checkpoint(CACHETABLE ct, PAIR p, bool checkpoint_pending)
// Else release write lock
//
static void
write_pair_for_checkpoint_thread (evictor* ev, PAIR p, bool aggressive)
write_pair_for_checkpoint_thread (evictor* ev, PAIR p)
{
// Grab an exclusive lock on the pair.
// If we grab an expensive lock, then other threads will return
......@@ -1022,8 +1019,7 @@ write_pair_for_checkpoint_thread (evictor* ev, PAIR p, bool aggressive)
p,
true, //for_checkpoint
&attr,
true, //is_clone
aggressive
true //is_clone
);
pair_lock(p);
nb_mutex_unlock(&p->disk_nb_mutex);
......@@ -2450,8 +2446,7 @@ static void cachetable_flush_pair_for_close(void* extra) {
p,
false, // not for a checkpoint, as we assert above
&attr,
false, // not a clone
true // aggressive
false // not a clone
);
p->dirty = CACHETABLE_CLEAN;
bjm_remove_background_job(args->bjm);
......@@ -2829,7 +2824,7 @@ cleanup:
// Mark every dirty node as "pending." ("Pending" means that the node must be
// written to disk before it can be modified.)
int
toku_cachetable_begin_checkpoint (CHECKPOINTER cp) {
toku_cachetable_begin_checkpoint (CHECKPOINTER cp, TOKULOGGER UU(logger)) {
return cp->begin_checkpoint();
}
......@@ -2849,9 +2844,9 @@ int toku_cachetable_get_checkpointing_user_data_status (void) {
// Use end_checkpoint callback to fsync dictionary and log, and to free unused blocks
// Note: If testcallback is null (for testing purposes only), call it after writing dictionary but before writing log
int
toku_cachetable_end_checkpoint(CHECKPOINTER cp, bool aggressive,
toku_cachetable_end_checkpoint(CHECKPOINTER cp, TOKULOGGER UU(logger),
void (*testcallback_f)(void*), void* testextra) {
return cp->end_checkpoint(aggressive, testcallback_f, testextra);
return cp->end_checkpoint(testcallback_f, testextra);
}
TOKULOGGER toku_cachefile_logger (CACHEFILE cf) {
......@@ -4401,17 +4396,12 @@ void checkpointer::remove_background_job() {
bjm_remove_background_job(m_checkpoint_clones_bjm);
}
int checkpointer::end_checkpoint(
bool aggressive,
void (*testcallback_f)(void*),
void* testextra
)
{
int checkpointer::end_checkpoint(void (*testcallback_f)(void*), void* testextra) {
int r = 0;
CACHEFILE *XMALLOC_N(m_checkpoint_num_files, checkpoint_cfs);
this->fill_checkpoint_cfs(checkpoint_cfs);
this->checkpoint_pending_pairs(aggressive);
this->checkpoint_pending_pairs();
this->checkpoint_userdata(checkpoint_cfs);
// For testing purposes only. Dictionary has been fsync-ed to disk but log has not yet been written.
if (testcallback_f) {
......@@ -4440,7 +4430,7 @@ void checkpointer::fill_checkpoint_cfs(CACHEFILE* checkpoint_cfs) {
m_cf_list->read_unlock();
}
void checkpointer::checkpoint_pending_pairs(bool aggressive) {
void checkpointer::checkpoint_pending_pairs() {
PAIR p;
m_list->read_list_lock();
while ((p = m_list->m_pending_head)!=0) {
......@@ -4450,7 +4440,7 @@ void checkpointer::checkpoint_pending_pairs(bool aggressive) {
// if still pending, clear the pending bit and write out the node
pair_lock(p);
m_list->read_list_unlock();
write_pair_for_checkpoint_thread(m_ev, p, aggressive);
write_pair_for_checkpoint_thread(m_ev, p);
pair_unlock(p);
m_list->read_list_lock();
}
......
......@@ -65,11 +65,8 @@ int toku_cachefile_of_iname_in_env (CACHETABLE ct, const char *iname_in_env, CAC
char * toku_cachefile_fname_in_cwd (CACHEFILE cf);
// TODO: #1510 Add comments on how these behave
int toku_cachetable_begin_checkpoint (CHECKPOINTER cp);
// Completes the checkpoint by writing dirty nodes and headers to disk.
// The aggressive variable determines if the checkpointer should aggressively
// use CPU during the writes or not.
int toku_cachetable_end_checkpoint(CHECKPOINTER cp, bool aggressive,
int toku_cachetable_begin_checkpoint (CHECKPOINTER cp, TOKULOGGER);
int toku_cachetable_end_checkpoint(CHECKPOINTER cp, TOKULOGGER logger,
void (*testcallback_f)(void*), void * testextra);
// Shuts down checkpoint thread
......@@ -121,10 +118,9 @@ enum cachetable_dirty {
// When write_me is true, the value should be written to storage.
// When keep_me is false, the value should be freed.
// When for_checkpoint is true, this was a 'pending' write
// When aggressive is true, the callback can feel free to use all the cores it can to complete the flush ASAP
// Returns: 0 if success, otherwise an error number.
// Can access fd (fd is protected by a readlock during call)
typedef void (*CACHETABLE_FLUSH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, void *value, void **disk_data, void *write_extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool is_clone, bool aggressive);
typedef void (*CACHETABLE_FLUSH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, void *value, void **disk_data, void *write_extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool is_clone);
// The fetch callback is called when a thread is attempting to get and pin a memory
// object and it is not in the cachetable.
......
......@@ -213,26 +213,6 @@ toku_checkpoint_destroy(void) {
initialized = false;
}
static bool checkpoint_caller_is_aggressive(checkpoint_caller_t caller_id) {
bool retval;
switch (caller_id) {
case SCHEDULED_CHECKPOINT:
case CLIENT_CHECKPOINT:
retval = false;
break;
case TXN_COMMIT_CHECKPOINT:
case STARTUP_CHECKPOINT:
case UPGRADE_CHECKPOINT:
case RECOVERY_CHECKPOINT:
case SHUTDOWN_CHECKPOINT:
retval = true;
break;
default:
abort();
}
return retval;
}
#define SET_CHECKPOINT_FOOTPRINT(x) STATUS_VALUE(CP_FOOTPRINT) = footprint_offset + x
......@@ -261,18 +241,16 @@ toku_checkpoint(CHECKPOINTER cp, TOKULOGGER logger,
SET_CHECKPOINT_FOOTPRINT(30);
STATUS_VALUE(CP_TIME_LAST_CHECKPOINT_BEGIN) = time(NULL);
r = toku_cachetable_begin_checkpoint(cp);
r = toku_cachetable_begin_checkpoint(cp, logger);
toku_ft_open_close_unlock();
multi_operation_checkpoint_unlock();
SET_CHECKPOINT_FOOTPRINT(40);
if (r==0) {
if (callback_f) {
if (callback_f)
callback_f(extra); // callback is called with checkpoint_safe_lock still held
}
bool aggressive = checkpoint_caller_is_aggressive(caller_id);
r = toku_cachetable_end_checkpoint(cp, aggressive, callback2_f, extra2);
r = toku_cachetable_end_checkpoint(cp, logger, callback2_f, extra2);
}
SET_CHECKPOINT_FOOTPRINT(50);
if (r==0 && logger) {
......
......@@ -524,7 +524,7 @@ int toku_serialize_ftnode_to_memory (FTNODE node,
bool in_parallel,
/*out*/ size_t *n_bytes_to_write,
/*out*/ char **bytes_to_write);
int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT h, bool for_checkpoint, bool aggressive);
int toku_serialize_ftnode_to(int fd, BLOCKNUM, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT h, bool for_checkpoint);
int toku_serialize_rollback_log_to (int fd, ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized_log, bool is_serialized,
FT h, bool for_checkpoint);
void toku_serialize_rollback_log_to_memory_uncompressed(ROLLBACK_LOG_NODE log, SERIALIZED_ROLLBACK_LOG_NODE serialized);
......@@ -641,7 +641,7 @@ STAT64INFO_S toku_get_and_clear_basement_stats(FTNODE leafnode);
void toku_evict_bn_from_memory(FTNODE node, int childnum, FT h);
void toku_ft_status_update_pivot_fetch_reason(struct ftnode_fetch_extra *bfe);
extern void toku_ftnode_clone_callback(void* value_data, void** cloned_value_data, PAIR_ATTR* new_attr, bool for_checkpoint, void* write_extraargs);
extern void toku_ftnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, void *ftnode_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool is_clone, bool aggressive);
extern void toku_ftnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, void *ftnode_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool is_clone);
extern int toku_ftnode_fetch_callback (CACHEFILE cachefile, PAIR p, int fd, BLOCKNUM nodename, uint32_t fullhash, void **ftnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int*dirty, void*extraargs);
extern void toku_ftnode_pe_est_callback(void* ftnode_pv, void* disk_data, long* bytes_freed_estimate, enum partial_eviction_cost *cost, void* write_extraargs);
extern int toku_ftnode_pe_callback (void *ftnode_pv, PAIR_ATTR old_attr, PAIR_ATTR* new_attr, void *extraargs);
......
......@@ -717,8 +717,7 @@ void toku_ftnode_flush_callback (
bool write_me,
bool keep_me,
bool for_checkpoint,
bool is_clone,
bool aggressive
bool is_clone
)
{
FT h = (FT) extraargs;
......@@ -732,7 +731,7 @@ void toku_ftnode_flush_callback (
}
if (!h->panic) { // if the brt panicked, stop writing, otherwise try to write it.
toku_assert_entire_node_in_memory(ftnode);
int r = toku_serialize_ftnode_to(fd, ftnode->thisnodename, ftnode, ndd, !is_clone, h, for_checkpoint, aggressive);
int r = toku_serialize_ftnode_to(fd, ftnode->thisnodename, ftnode, ndd, !is_clone, h, for_checkpoint);
assert_zero(r);
ftnode->layout_version_read_from_disk = FT_LAYOUT_VERSION;
}
......
......@@ -861,7 +861,7 @@ toku_serialize_ftnode_to_memory (FTNODE node,
}
int
toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT h, bool for_checkpoint, bool aggressive) {
toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DATA* ndd, bool do_rebalancing, FT h, bool for_checkpoint) {
size_t n_to_write;
char *compressed_buf = NULL;
......@@ -885,7 +885,7 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA
h->h->basementnodesize,
h->h->compression_method,
do_rebalancing,
aggressive, // in_parallel
false, // in_parallel
&n_to_write,
&compressed_buf
);
......
......@@ -26,7 +26,7 @@ rollback_log_destroy(ROLLBACK_LOG_NODE log) {
// On success return nbytes.
void toku_rollback_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM logname,
void *rollback_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size,
bool write_me, bool keep_me, bool for_checkpoint, bool is_clone, bool UU(aggressive)) {
bool write_me, bool keep_me, bool for_checkpoint, bool is_clone) {
int r;
ROLLBACK_LOG_NODE log = nullptr;
SERIALIZED_ROLLBACK_LOG_NODE serialized = nullptr;
......
......@@ -11,7 +11,7 @@
#include "cachetable.h"
#include "fttypes.h"
void toku_rollback_flush_callback(CACHEFILE cachefile, int fd, BLOCKNUM logname, void *rollback_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool UU(is_clone), bool UU(aggressive));
void toku_rollback_flush_callback(CACHEFILE cachefile, int fd, BLOCKNUM logname, void *rollback_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, bool write_me, bool keep_me, bool for_checkpoint, bool UU(is_clone));
int toku_rollback_fetch_callback(CACHEFILE cachefile, PAIR p, int fd, BLOCKNUM logname, uint32_t fullhash, void **rollback_pv, void** UU(disk_data), PAIR_ATTR *sizep, int * UU(dirtyp), void *extraargs);
void toku_rollback_pe_est_callback(
void* rollback_v,
......
......@@ -53,7 +53,7 @@ run_test (void) {
// pin 1 and 2
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp);
r = toku_cachetable_begin_checkpoint(cp, NULL);
// mark nodes as pending a checkpoint, so that get_and_pin_nonblocking on block 1 will return TOKUDB_TRY_AGAIN
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); assert(r==0);
......@@ -82,7 +82,7 @@ run_test (void) {
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -43,7 +43,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
if (check_flush && w) {
dirty_flush_called = true;
......
......@@ -17,7 +17,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d write_me %d\n", (int)k.b, w); }
......
......@@ -46,7 +46,7 @@ flush (
bool write_me,
bool keep_me,
bool UU(for_checkpoint),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
)
{
// printf("f");
......
......@@ -24,7 +24,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
......@@ -91,7 +91,7 @@ cachetable_test (void) {
// flush will be called only for v1, because v1 is dirty
//
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert(r == 0);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert(r == 0);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
......@@ -101,7 +101,7 @@ cachetable_test (void) {
flush_called = false;
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -24,7 +24,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
......@@ -93,14 +93,14 @@ cachetable_test (void) {
// flush will be called only for v1, because v1 is dirty
//
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert(r == 0);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert(r == 0);
check_me = true;
flush_called = false;
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -26,7 +26,7 @@ static void flush(
bool write_me,
bool keep_me,
bool UU(for_checkpoint),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
)
{
//cf = cf; key = key; value = value; extraargs = extraargs;
......
......@@ -280,7 +280,7 @@ void checkpointer_test::test_end_checkpoint() {
assert(pending_pairs == count / 2);
// 5. Call end checkpoint
m_cp.end_checkpoint(false, NULL, NULL);
m_cp.end_checkpoint(NULL, NULL);
pending_pairs = get_number_pending_pairs(m_cp.m_list);
assert(pending_pairs == 0);
......
......@@ -22,7 +22,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
......@@ -83,13 +83,13 @@ cachetable_test (void) {
cleaner_called = false;
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp);
r = toku_cachetable_begin_checkpoint(cp, NULL);
assert_zero(r);
toku_cleaner_thread_for_test(ct);
assert(cleaner_called);
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -22,7 +22,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
......@@ -83,13 +83,13 @@ cachetable_test (void) {
cleaner_called = false;
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp);
r = toku_cachetable_begin_checkpoint(cp, NULL);
assert_zero(r);
toku_cleaner_thread_for_test(ct);
assert(!cleaner_called);
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -39,7 +39,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
PAIR_ATTR *CAST_FROM_VOIDP(expect, e);
if (!keep) {
......
......@@ -26,10 +26,10 @@ cachetable_test (void) {
usleep(4000000);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert(r == 0);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert(r == 0);
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -22,7 +22,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
if (check_flush && !keep) {
......
......@@ -21,7 +21,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep,
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
assert(flush_may_occur);
if (!keep) {
......@@ -63,7 +63,7 @@ other_flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
}
......
......@@ -21,7 +21,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep,
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
assert(flush_may_occur);
if (!keep) {
......@@ -63,7 +63,7 @@ other_flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
}
......
......@@ -32,7 +32,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
if (check_flush && !keep) {
......
......@@ -30,8 +30,7 @@ flush (
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool is_clone,
bool UU(aggressive)
bool is_clone
)
{
if (is_clone) {
......@@ -45,7 +44,7 @@ static void *run_end_checkpoint(void *arg) {
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
int r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......@@ -78,7 +77,7 @@ cachetable_test (void) {
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp);
r = toku_cachetable_begin_checkpoint(cp, NULL);
clone_flush_started = false;
......
......@@ -29,7 +29,7 @@ flush (
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
)
{
if (is_clone) {
......@@ -73,7 +73,7 @@ cachetable_test (void) {
flush_completed = false;
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert_zero(r);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert_zero(r);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
......@@ -93,7 +93,7 @@ cachetable_test (void) {
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -29,7 +29,7 @@ flush (
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
)
{
if (is_clone) {
......@@ -78,7 +78,7 @@ cachetable_test (void) {
flush_completed = false;
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert_zero(r);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert_zero(r);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
......@@ -94,7 +94,7 @@ cachetable_test (void) {
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -27,7 +27,7 @@ flush (
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
)
{
}
......@@ -58,7 +58,7 @@ cachetable_test (enum cachetable_dirty dirty, bool cloneable) {
// test that having a pin that passes false for may_modify_value does not stall behind checkpoint
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert_zero(r);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert_zero(r);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_READ, NULL, NULL);
assert(r == 0);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
......@@ -75,7 +75,7 @@ cachetable_test (enum cachetable_dirty dirty, bool cloneable) {
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -29,7 +29,7 @@ flush (
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
)
{
if (is_clone) {
......@@ -72,7 +72,7 @@ cachetable_test (void) {
flush_completed = false;
evict_called = false;
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert_zero(r);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert_zero(r);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
assert_zero(r);
......@@ -82,7 +82,7 @@ cachetable_test (void) {
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -23,7 +23,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
assert(expect_full_flush);
sleep(2);
......
......@@ -23,7 +23,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
assert(expect_full_flush);
}
......
......@@ -21,7 +21,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
if (do_sleep) {
sleep(3);
......
......@@ -18,7 +18,7 @@ flush (CACHEFILE cf __attribute__((__unused__)),
bool write_me __attribute__((__unused__)),
bool keep_me __attribute__((__unused__)),
bool for_checkpoint __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
assert((long) key.b == size.size);
if (!keep_me) toku_free(v);
......
......@@ -52,7 +52,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool write_me,
bool keep_me,
bool checkpoint_me,
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
int64_t val_to_write = *(int64_t *)v;
......@@ -271,10 +271,10 @@ static void *checkpoints(void *arg) {
//
int r;
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert(r == 0);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert(r == 0);
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -37,7 +37,7 @@ run_test (void) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp);
r = toku_cachetable_begin_checkpoint(cp, NULL);
// mark nodes as pending a checkpoint, so that get_and_pin_nonblocking on block 1 will return TOKUDB_TRY_AGAIN
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
......@@ -60,7 +60,7 @@ run_test (void) {
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -28,7 +28,7 @@ static void flush(
bool write_me,
bool keep_me,
bool UU(for_checkpoint),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
)
{
// assert(key == make_blocknum((long)value));
......
......@@ -19,7 +19,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
assert(w == false && v != NULL);
toku_free(v);
......
......@@ -21,7 +21,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
assert(w == false);
}
......
......@@ -26,7 +26,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w,
bool keep,
bool f_ckpt __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
assert(w == false);
sleep(1);
......
......@@ -22,7 +22,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
assert(w == false);
}
......
......@@ -55,7 +55,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool write_me,
bool keep_me,
bool checkpoint_me,
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
int64_t val_to_write = *(int64_t *)v;
size_t data_index = (size_t)k.b;
......@@ -400,10 +400,10 @@ static void *checkpoints(void *arg) {
//
int r;
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert(r == 0);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert(r == 0);
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -22,7 +22,7 @@ static void f_flush (CACHEFILE f,
bool write_me,
bool keep_me,
bool for_checkpoint __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
assert(size.size==BLOCKSIZE);
if (write_me) {
......
......@@ -32,7 +32,7 @@ flush (
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
)
{
if (w) usleep(5*1024*1024);
......@@ -86,7 +86,7 @@ test_clean (enum cachetable_dirty dirty, bool cloneable) {
// begin checkpoint, since pair is clean, we should not
// have the clone called
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp);
r = toku_cachetable_begin_checkpoint(cp, NULL);
assert_zero(r);
struct timeval tstart;
struct timeval tend;
......@@ -132,7 +132,7 @@ test_clean (enum cachetable_dirty dirty, bool cloneable) {
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -32,7 +32,7 @@ flush (
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
)
{
if (w && check_flush) {
......@@ -70,7 +70,7 @@ test_clean (enum cachetable_dirty dirty, bool cloneable) {
// begin checkpoint, since pair is clean, we should not
// have the clone called
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp);
r = toku_cachetable_begin_checkpoint(cp, NULL);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
......@@ -79,7 +79,7 @@ test_clean (enum cachetable_dirty dirty, bool cloneable) {
usleep(2*1024*1024);
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -52,7 +52,7 @@ cachetable_test (void) {
assert(r==0);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert(r == 0);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert(r == 0);
// now these should fail, because the node should be pending a checkpoint
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, &v1);
assert(r==-1);
......@@ -60,7 +60,7 @@ cachetable_test (void) {
assert(r==-1);
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -25,7 +25,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
......@@ -107,7 +107,7 @@ cachetable_test (bool write_first, bool write_second, bool start_checkpoint) {
//
// should mark the v1 and v2 as pending
//
r = toku_cachetable_begin_checkpoint(cp); assert(r==0);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert(r==0);
}
//
// This call should cause a flush for both
......@@ -147,7 +147,7 @@ cachetable_test (bool write_first, bool write_second, bool start_checkpoint) {
if (start_checkpoint) {
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -17,7 +17,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
if (w) {
assert(c);
......
......@@ -24,7 +24,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
......@@ -109,12 +109,12 @@ run_test (void) {
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); assert(r==0);
// this should mark the PAIR as pending
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert(r == 0);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert(r == 0);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, PL_WRITE_EXPENSIVE, NULL, NULL);
assert(r==TOKUDB_TRY_AGAIN);
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -26,7 +26,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
......@@ -81,7 +81,7 @@ run_test (void) {
// now this should mark the pair for checkpoint
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp);
r = toku_cachetable_begin_checkpoint(cp, NULL);
//
// now we pin the pair again, and verify in flush callback that the pair is being checkpointed
......@@ -96,7 +96,7 @@ run_test (void) {
check_me = false;
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -25,7 +25,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
......@@ -112,7 +112,7 @@ cachetable_test (bool write_first, bool write_second, bool start_checkpoint) {
//
// should mark the v1 and v2 as pending
//
r = toku_cachetable_begin_checkpoint(cp); assert(r==0);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert(r==0);
}
//
// This call should cause a flush for both
......@@ -156,7 +156,7 @@ cachetable_test (bool write_first, bool write_second, bool start_checkpoint) {
if (start_checkpoint) {
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -43,11 +43,11 @@ cachetable_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp); assert(r == 0);
r = toku_cachetable_begin_checkpoint(cp, NULL); assert(r == 0);
r = toku_test_cachetable_unpin_and_remove(f1, make_blocknum(1), remove_key_expect_checkpoint, NULL);
r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......
......@@ -81,7 +81,7 @@ static void flush_n (CACHEFILE f __attribute__((__unused__)), int UU(fd), CACHEK
PAIR_ATTR* new_size __attribute__((__unused__)),
bool write_me __attribute__((__unused__)), bool keep_me __attribute__((__unused__)),
bool for_checkpoint __attribute__ ((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
int *CAST_FROM_VOIDP(v, value);
assert(*v==0);
......@@ -156,7 +156,7 @@ static void null_flush (CACHEFILE cf __attribute__((__unused__)),
bool write_me __attribute__((__unused__)),
bool keep_me __attribute__((__unused__)),
bool for_checkpoint __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
}
......@@ -240,7 +240,7 @@ static void test_dirty_flush(CACHEFILE f,
bool do_write,
bool keep,
bool for_checkpoint __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
if (verbose) printf("test_dirty_flush %p %" PRId64 " %p %ld %u %u\n", f, key.b, value, size.size, (unsigned)do_write, (unsigned)keep);
}
......@@ -374,7 +374,7 @@ static void test_size_flush_callback(CACHEFILE f,
bool do_write,
bool keep,
bool for_checkpoint __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
if (test_size_debug && verbose) printf("test_size_flush %p %" PRId64 " %p %ld %u %u\n", f, key.b, value, size.size, (unsigned)do_write, (unsigned)keep);
if (keep) {
......
......@@ -17,7 +17,7 @@ static void *run_end_chkpt(void *arg) {
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
int r = toku_cachetable_end_checkpoint(
cp,
false,
NULL,
NULL,
NULL
);
......@@ -53,7 +53,7 @@ run_test (void) {
// now this should mark the pair for checkpoint
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
r = toku_cachetable_begin_checkpoint(cp);
r = toku_cachetable_begin_checkpoint(cp, NULL);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
toku_pthread_t mytid;
......
......@@ -22,7 +22,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
if (w) {
int curr_size = __sync_fetch_and_sub(&total_size, 1);
......
......@@ -357,7 +357,7 @@ test_prefetching(void) {
assert(size == 100);
}
FTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false, false);
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false);
assert(r==0);
test_prefetch_read(fd, brt, brt_h);
......
......@@ -302,7 +302,7 @@ test_serialize_nonleaf(void) {
assert(size == 100);
}
FTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false, false);
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false);
assert(r==0);
test1(fd, brt_h, &dn);
......@@ -390,7 +390,7 @@ test_serialize_leaf(void) {
assert(size == 100);
}
FTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false, false);
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false);
assert(r==0);
test1(fd, brt_h, &dn);
......
......@@ -139,7 +139,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
struct timeval t[2];
gettimeofday(&t[0], NULL);
FTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, &ndd, true, brt->ft, false, false);
r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, &ndd, true, brt->ft, false);
assert(r==0);
gettimeofday(&t[1], NULL);
double dt;
......@@ -269,7 +269,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
struct timeval t[2];
gettimeofday(&t[0], NULL);
FTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false, false);
r = toku_serialize_ftnode_to(fd, make_blocknum(20), &sn, &ndd, true, brt->ft, false);
assert(r==0);
gettimeofday(&t[1], NULL);
double dt;
......
......@@ -191,12 +191,12 @@ static void write_sn_to_disk(int fd, FT_HANDLE brt, FTNODE sn, FTNODE_DISK_DATA*
PAIR_ATTR attr;
toku_ftnode_clone_callback(sn, &cloned_node_v, &attr, false, brt->ft);
FTNODE CAST_FROM_VOIDP(cloned_node, cloned_node_v);
r = toku_serialize_ftnode_to(fd, make_blocknum(20), cloned_node, src_ndd, false, brt->ft, false, false);
r = toku_serialize_ftnode_to(fd, make_blocknum(20), cloned_node, src_ndd, false, brt->ft, false);
assert(r==0);
toku_ftnode_free(&cloned_node);
}
else {
r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, src_ndd, true, brt->ft, false, false);
r = toku_serialize_ftnode_to(fd, make_blocknum(20), sn, src_ndd, true, brt->ft, false);
assert(r==0);
}
}
......
......@@ -127,7 +127,7 @@ def_flush (CACHEFILE f __attribute__((__unused__)),
bool w __attribute__((__unused__)),
bool keep __attribute__((__unused__)),
bool c __attribute__((__unused__)),
bool UU(is_clone), bool UU(aggressive)
bool UU(is_clone)
) {
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment