Commit c527ba29 authored by Yoni Fogel's avatar Yoni Fogel

refs #5081 Replace all usage:

BOOL->bool
FALSE->false
TRUE->true
u_int*_t->uint*_t

Also poisoned all of the variables

git-svn-id: file:///svn/toku/tokudb@46157 c7de825b-a66e-492c-adef-691d508d4ae1
parent 36242fd4
This diff is collapsed.
This diff is collapsed.
......@@ -68,11 +68,11 @@ int if_transactions_do_logging = DB_INIT_LOG; // set this to zero if we want no
int do_abort = 0;
int n_insertions_since_txn_began=0;
int env_open_flags = DB_CREATE|DB_PRIVATE|DB_INIT_MPOOL;
u_int32_t put_flags = 0;
uint32_t put_flags = 0;
double compressibility = -1; // -1 means make it very compressible. 1 means use random bits everywhere. 2 means half the bits are random.
int do_append = 0;
int do_checkpoint_period = 0;
u_int32_t checkpoint_period = 0;
uint32_t checkpoint_period = 0;
static const char *log_dir = NULL;
static int commitflags = 0;
static int redzone = 0;
......@@ -221,7 +221,7 @@ static void benchmark_setup (void) {
if (do_checkpoint_period) {
r = dbenv->checkpointing_set_period(dbenv, checkpoint_period);
assert(r == 0);
u_int32_t period;
uint32_t period;
r = dbenv->checkpointing_get_period(dbenv, &period);
assert(r == 0 && period == checkpoint_period);
}
......@@ -231,16 +231,16 @@ static void benchmark_setup (void) {
if (cleaner_period) {
r = dbenv->cleaner_set_period(dbenv, cleaner_period);
assert(r == 0);
u_int32_t period;
uint32_t period;
r = dbenv->cleaner_get_period(dbenv, &period);
assert(r == 0 && period == (u_int32_t)cleaner_period);
assert(r == 0 && period == (uint32_t)cleaner_period);
}
if (cleaner_iterations) {
r = dbenv->cleaner_set_iterations(dbenv, cleaner_iterations);
assert(r == 0);
u_int32_t iterations;
uint32_t iterations;
r = dbenv->cleaner_get_iterations(dbenv, &iterations);
assert(r == 0 && iterations == (u_int32_t)cleaner_iterations);
assert(r == 0 && iterations == (uint32_t)cleaner_iterations);
}
#endif
......@@ -695,7 +695,7 @@ static int test_main (int argc, char *const argv[]) {
} else if (strcmp(arg, "--checkpoint-period") == 0) {
if (i+1 >= argc) return print_usage(argv[9]);
do_checkpoint_period = 1;
checkpoint_period = (u_int32_t) atoi(argv[++i]);
checkpoint_period = (uint32_t) atoi(argv[++i]);
} else if (strcmp(arg, "--nosync") == 0) {
commitflags += DB_TXN_NOSYNC;
} else if (strcmp(arg, "--userandom") == 0) {
......
......@@ -5,7 +5,6 @@
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <db.h>
#include "tokudb_common_funcs.h"
#include <assert.h>
......
......@@ -23,7 +23,7 @@ static const char *dbdir = "./bench." STRINGIFY(DIRSUF); /* DIRSUF is passed in
static int env_open_flags_yesx = DB_CREATE|DB_PRIVATE|DB_INIT_MPOOL|DB_INIT_TXN|DB_INIT_LOG|DB_INIT_LOCK|DB_RECOVER|DB_THREAD;
// static int env_open_flags_nox = DB_CREATE|DB_PRIVATE|DB_INIT_MPOOL;
static const char *dbfilename = "bench.db";
static u_int64_t cachesize = 127*1024*1024;
static uint64_t cachesize = 127*1024*1024;
static int nqueries = 1000000;
static int nthreads = 1;
static const char *log_dir = NULL;
......
......@@ -21,7 +21,7 @@
static const char *pname;
static long limitcount=-1;
static u_int32_t cachesize = 16*1024*1024;
static uint32_t cachesize = 16*1024*1024;
#define STRINGIFY2(s) #s
#define STRINGIFY(s) STRINGIFY2(s)
......@@ -41,7 +41,7 @@ static void parse_args (int argc, char *const argv[]) {
} else if (strcmp(*argv, "--cachesize")==0 && argc>0) {
char *end;
argv++; argc--;
cachesize=(u_int32_t)strtol(*argv, &end, 10);
cachesize=(uint32_t)strtol(*argv, &end, 10);
} else if (strcmp(*argv, "--env") == 0) {
argv++; argc--;
if (argc <= 0) goto print_usage;
......
......@@ -24,11 +24,11 @@ static const char *pname;
static enum run_mode { RUN_HWC, RUN_LWC, RUN_VERIFY, RUN_RANGE} run_mode = RUN_HWC;
static int do_txns=1, prelock=0, prelockflag=0;
static int cleaner_period=0, cleaner_iterations=0;
static u_int32_t lock_flag = 0;
static uint32_t lock_flag = 0;
static long limitcount=-1;
static u_int32_t cachesize = 127*1024*1024;
static uint32_t cachesize = 127*1024*1024;
static int do_mysql = 0;
static u_int64_t start_range = 0, end_range = 0;
static uint64_t start_range = 0, end_range = 0;
static int n_experiments = 2;
static int bulk_fetch = 1;
......@@ -110,15 +110,15 @@ static void parse_args (int argc, char *const argv[]) {
} else if (strcmp(*argv, "--cachesize")==0 && argc>0) {
char *end;
argc--; argv++;
cachesize=(u_int32_t)strtol(*argv, &end, 10);
cachesize=(uint32_t)strtol(*argv, &end, 10);
} else if (strcmp(*argv, "--cleaner-period")==0 && argc>0) {
char *end;
argc--; argv++;
cleaner_period=(u_int32_t)strtol(*argv, &end, 10);
cleaner_period=(uint32_t)strtol(*argv, &end, 10);
} else if (strcmp(*argv, "--cleaner-iterations")==0 && argc>0) {
char *end;
argc--; argv++;
cleaner_iterations=(u_int32_t)strtol(*argv, &end, 10);
cleaner_iterations=(uint32_t)strtol(*argv, &end, 10);
} else if (strcmp(*argv, "--env") == 0) {
argc--; argv++;
if (argc==0) exit(print_usage(pname));
......@@ -264,7 +264,7 @@ static void scanscan_hwc (void) {
r = db->cursor(db, tid, &dbc, 0); assert(r==0);
memset(&k, 0, sizeof(k));
memset(&v, 0, sizeof(v));
u_int32_t c_get_flags = DB_NEXT;
uint32_t c_get_flags = DB_NEXT;
if (prelockflag && (counter || prelock)) {
c_get_flags |= lock_flag;
}
......@@ -313,7 +313,7 @@ static void scanscan_lwc (void) {
if(prelock) {
r = dbc->c_pre_acquire_range_lock(dbc, db->dbt_neg_infty(), db->dbt_pos_infty()); assert(r==0);
}
u_int32_t f_flags = 0;
uint32_t f_flags = 0;
if (prelockflag && (counter || prelock)) {
f_flags |= lock_flag;
}
......@@ -336,7 +336,7 @@ static void scanscan_range (void) {
int r;
double texperiments[n_experiments];
u_int64_t k = 0;
uint64_t k = 0;
char kv[8];
DBT key, val;
......@@ -432,8 +432,8 @@ static void scanscan_verify (void) {
r = db->cursor(db, tid, &dbc2, 0); assert(r==0);
memset(&v.k, 0, sizeof(v.k));
memset(&v.v, 0, sizeof(v.v));
u_int32_t f_flags = 0;
u_int32_t c_get_flags = DB_NEXT;
uint32_t f_flags = 0;
uint32_t c_get_flags = DB_NEXT;
if (prelockflag && (counter || prelock)) {
f_flags |= lock_flag;
c_get_flags |= lock_flag;
......
......@@ -8,7 +8,6 @@
#include "toku_portability.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <unistd.h>
#include <string.h>
#include <sys/time.h>
......
......@@ -5,7 +5,6 @@
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <config.h>
#include <stdbool.h>
#include <toku_pthread.h>
#include "kibbutz.h"
#include "background_job_manager.h"
......@@ -13,7 +12,7 @@
struct background_job_manager_struct {
bool accepting_jobs;
u_int32_t num_jobs;
uint32_t num_jobs;
toku_cond_t jobs_wait;
toku_mutex_t jobs_lock;
};
......
......@@ -11,18 +11,18 @@
// Previous implementation used next_fit, but now use first_fit since we are moving blocks around to reduce file size.
struct block_allocator {
u_int64_t reserve_at_beginning; // How much to reserve at the beginning
u_int64_t alignment; // Block alignment
u_int64_t n_blocks; // How many blocks
u_int64_t blocks_array_size; // How big is the blocks_array. Must be >= n_blocks.
uint64_t reserve_at_beginning; // How much to reserve at the beginning
uint64_t alignment; // Block alignment
uint64_t n_blocks; // How many blocks
uint64_t blocks_array_size; // How big is the blocks_array. Must be >= n_blocks.
struct block_allocator_blockpair *blocks_array; // These blocks are sorted by address.
u_int64_t n_bytes_in_use; // including the reserve_at_beginning
uint64_t n_bytes_in_use; // including the reserve_at_beginning
};
void
block_allocator_validate (BLOCK_ALLOCATOR ba) {
u_int64_t i;
u_int64_t n_bytes_in_use = ba->reserve_at_beginning;
uint64_t i;
uint64_t n_bytes_in_use = ba->reserve_at_beginning;
for (i=0; i<ba->n_blocks; i++) {
n_bytes_in_use += ba->blocks_array[i].size;
if (i>0) {
......@@ -42,7 +42,7 @@ block_allocator_validate (BLOCK_ALLOCATOR ba) {
#if 0
void
block_allocator_print (BLOCK_ALLOCATOR ba) {
u_int64_t i;
uint64_t i;
for (i=0; i<ba->n_blocks; i++) {
printf("%" PRId64 ":%" PRId64 " ", ba->blocks_array[i].offset, ba->blocks_array[i].size);
}
......@@ -52,7 +52,7 @@ block_allocator_print (BLOCK_ALLOCATOR ba) {
#endif
void
create_block_allocator (BLOCK_ALLOCATOR *ba, u_int64_t reserve_at_beginning, u_int64_t alignment) {
create_block_allocator (BLOCK_ALLOCATOR *ba, uint64_t reserve_at_beginning, uint64_t alignment) {
BLOCK_ALLOCATOR XMALLOC(result);
result->reserve_at_beginning = reserve_at_beginning;
result->alignment = alignment;
......@@ -73,10 +73,10 @@ destroy_block_allocator (BLOCK_ALLOCATOR *bap) {
}
static void
grow_blocks_array_by (BLOCK_ALLOCATOR ba, u_int64_t n_to_add) {
grow_blocks_array_by (BLOCK_ALLOCATOR ba, uint64_t n_to_add) {
if (ba->n_blocks + n_to_add > ba->blocks_array_size) {
u_int64_t new_size = ba->n_blocks + n_to_add;
u_int64_t at_least = ba->blocks_array_size * 2;
uint64_t new_size = ba->n_blocks + n_to_add;
uint64_t at_least = ba->blocks_array_size * 2;
if (at_least > new_size) {
new_size = at_least;
}
......@@ -92,10 +92,10 @@ grow_blocks_array (BLOCK_ALLOCATOR ba) {
}
void
block_allocator_merge_blockpairs_into (u_int64_t d, struct block_allocator_blockpair dst[/*d*/],
u_int64_t s, const struct block_allocator_blockpair src[/*s*/])
block_allocator_merge_blockpairs_into (uint64_t d, struct block_allocator_blockpair dst[/*d*/],
uint64_t s, const struct block_allocator_blockpair src[/*s*/])
{
u_int64_t tail = d+s;
uint64_t tail = d+s;
while (d>0 && s>0) {
struct block_allocator_blockpair *dp = &dst[d-1];
struct block_allocator_blockpair const *sp = &src[s-1];
......@@ -137,12 +137,12 @@ compare_blockpairs (const void *av, const void *bv) {
}
void
block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, u_int64_t n_blocks, struct block_allocator_blockpair pairs[/*n_blocks*/])
block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, uint64_t n_blocks, struct block_allocator_blockpair pairs[/*n_blocks*/])
// See the documentation in block_allocator.h
{
VALIDATE(ba);
qsort(pairs, n_blocks, sizeof(*pairs), compare_blockpairs);
for (u_int64_t i=0; i<n_blocks; i++) {
for (uint64_t i=0; i<n_blocks; i++) {
assert(pairs[i].offset >= ba->reserve_at_beginning);
assert(pairs[i].offset%ba->alignment == 0);
ba->n_bytes_in_use += pairs[i].size;
......@@ -155,7 +155,7 @@ block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, u_int64_t n_blocks, struct
}
void
block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t offset) {
block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t offset) {
struct block_allocator_blockpair p = {.offset = offset, .size=size};
// Just do a linear search for the block.
// This data structure is a sorted array (no gaps or anything), so the search isn't really making this any slower than the insertion.
......@@ -163,15 +163,15 @@ block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t of
block_allocator_alloc_blocks_at(ba, 1, &p);
}
static inline u_int64_t
align (u_int64_t value, BLOCK_ALLOCATOR ba)
static inline uint64_t
align (uint64_t value, BLOCK_ALLOCATOR ba)
// Effect: align a value by rounding up.
{
return ((value+ba->alignment-1)/ba->alignment)*ba->alignment;
}
void
block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offset) {
block_allocator_alloc_block (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t *offset) {
grow_blocks_array(ba);
ba->n_bytes_in_use += size;
if (ba->n_blocks==0) {
......@@ -184,7 +184,7 @@ block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offs
}
// Implement first fit.
{
u_int64_t end_of_reserve = align(ba->reserve_at_beginning, ba);
uint64_t end_of_reserve = align(ba->reserve_at_beginning, ba);
if (end_of_reserve + size <= ba->blocks_array[0].offset ) {
// Check to see if the space immediately after the reserve is big enough to hold the new block.
struct block_allocator_blockpair *bp = &ba->blocks_array[0];
......@@ -197,12 +197,12 @@ block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offs
return;
}
}
for (u_int64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) {
for (uint64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) {
// Consider the space after blocknum
struct block_allocator_blockpair *bp = &ba->blocks_array[blocknum];
u_int64_t this_offset = bp[0].offset;
u_int64_t this_size = bp[0].size;
u_int64_t answer_offset = align(this_offset + this_size, ba);
uint64_t this_offset = bp[0].offset;
uint64_t this_size = bp[0].size;
uint64_t answer_offset = align(this_offset + this_size, ba);
if (answer_offset + size > bp[1].offset) continue; // The block we want doesn't fit after this block.
// It fits, so allocate it here.
memmove(bp+2, bp+1, (ba->n_blocks - blocknum -1)*sizeof(*bp));
......@@ -216,7 +216,7 @@ block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offs
// It didn't fit anywhere, so fit it on the end.
assert(ba->n_blocks < ba->blocks_array_size);
struct block_allocator_blockpair *bp = &ba->blocks_array[ba->n_blocks];
u_int64_t answer_offset = align(bp[-1].offset+bp[-1].size, ba);
uint64_t answer_offset = align(bp[-1].offset+bp[-1].size, ba);
bp->offset = answer_offset;
bp->size = size;
ba->n_blocks++;
......@@ -225,7 +225,7 @@ block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offs
}
static int64_t
find_block (BLOCK_ALLOCATOR ba, u_int64_t offset)
find_block (BLOCK_ALLOCATOR ba, uint64_t offset)
// Find the index in the blocks array that has a particular offset. Requires that the block exist.
// Use binary search so it runs fast.
{
......@@ -234,12 +234,12 @@ find_block (BLOCK_ALLOCATOR ba, u_int64_t offset)
assert(ba->blocks_array[0].offset == offset);
return 0;
}
u_int64_t lo = 0;
u_int64_t hi = ba->n_blocks;
uint64_t lo = 0;
uint64_t hi = ba->n_blocks;
while (1) {
assert(lo<hi); // otherwise no such block exists.
u_int64_t mid = (lo+hi)/2;
u_int64_t thisoff = ba->blocks_array[mid].offset;
uint64_t mid = (lo+hi)/2;
uint64_t thisoff = ba->blocks_array[mid].offset;
//printf("lo=%" PRId64 " hi=%" PRId64 " mid=%" PRId64 " thisoff=%" PRId64 " offset=%" PRId64 "\n", lo, hi, mid, thisoff, offset);
if (thisoff < offset) {
lo = mid+1;
......@@ -252,7 +252,7 @@ find_block (BLOCK_ALLOCATOR ba, u_int64_t offset)
}
void
block_allocator_free_block (BLOCK_ALLOCATOR ba, u_int64_t offset) {
block_allocator_free_block (BLOCK_ALLOCATOR ba, uint64_t offset) {
VALIDATE(ba);
int64_t bn = find_block(ba, offset);
assert(bn>=0); // we require that there is a block with that offset. Might as well abort if no such block exists.
......@@ -262,14 +262,14 @@ block_allocator_free_block (BLOCK_ALLOCATOR ba, u_int64_t offset) {
VALIDATE(ba);
}
u_int64_t
block_allocator_block_size (BLOCK_ALLOCATOR ba, u_int64_t offset) {
uint64_t
block_allocator_block_size (BLOCK_ALLOCATOR ba, uint64_t offset) {
int64_t bn = find_block(ba, offset);
assert(bn>=0); // we require that there is a block with that offset. Might as well abort if no such block exists.
return ba->blocks_array[bn].size;
}
u_int64_t
uint64_t
block_allocator_allocated_limit (BLOCK_ALLOCATOR ba) {
if (ba->n_blocks==0) return ba->reserve_at_beginning;
else {
......@@ -279,7 +279,7 @@ block_allocator_allocated_limit (BLOCK_ALLOCATOR ba) {
}
int
block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, u_int64_t b, u_int64_t *offset, u_int64_t *size)
block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, uint64_t b, uint64_t *offset, uint64_t *size)
// Effect: Consider the blocks in sorted order. The reserved block at the beginning is number 0. The next one is number 1 and so forth.
// Return the offset and size of the block with that number.
// Return 0 if there is a block that big, return nonzero if b is too big.
......@@ -324,7 +324,7 @@ block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTATION
}
//Deal with space between blocks:
for (u_int64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) {
for (uint64_t blocknum = 0; blocknum +1 < ba->n_blocks; blocknum ++) {
// Consider the space after blocknum
struct block_allocator_blockpair *bp = &ba->blocks_array[blocknum];
uint64_t this_offset = bp[0].offset;
......
......@@ -39,7 +39,7 @@
typedef struct block_allocator *BLOCK_ALLOCATOR;
void create_block_allocator (BLOCK_ALLOCATOR * ba, u_int64_t reserve_at_beginning, u_int64_t alignment);
void create_block_allocator (BLOCK_ALLOCATOR * ba, uint64_t reserve_at_beginning, uint64_t alignment);
// Effect: Create a block allocator, in which the first RESERVE_AT_BEGINNING bytes are not put into a block.
// All blocks be start on a multiple of ALIGNMENT.
// Aborts if we run out of memory.
......@@ -56,7 +56,7 @@ void destroy_block_allocator (BLOCK_ALLOCATOR *ba);
// ba (IN/OUT):
void block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t offset);
void block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t offset);
// Effect: Allocate a block of the specified size at a particular offset.
// Aborts if anything goes wrong.
// The performance of this function may be as bad as Theta(N), where N is the number of blocks currently in use.
......@@ -70,15 +70,15 @@ void block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64
struct block_allocator_blockpair {
u_int64_t offset;
u_int64_t size;
uint64_t offset;
uint64_t size;
};
void block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, u_int64_t n_blocks, struct block_allocator_blockpair *pairs);
void block_allocator_alloc_blocks_at (BLOCK_ALLOCATOR ba, uint64_t n_blocks, struct block_allocator_blockpair *pairs);
// Effect: Take pairs in any order, and add them all, as if we did block_allocator_alloc_block() on each pair.
// This should run in time O(N + M log M) where N is the number of blocks in ba, and M is the number of new blocks.
// Modifies: pairs (sorts them).
void block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offset);
void block_allocator_alloc_block (BLOCK_ALLOCATOR ba, uint64_t size, uint64_t *offset);
// Effect: Allocate a block of the specified size at an address chosen by the allocator.
// Aborts if anything goes wrong.
// The block address will be a multiple of the alignment.
......@@ -87,7 +87,7 @@ void block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t
// size (IN): The size of the block. (The size does not have to be aligned.)
// offset (OUT): The location of the block.
void block_allocator_free_block (BLOCK_ALLOCATOR ba, u_int64_t offset);
void block_allocator_free_block (BLOCK_ALLOCATOR ba, uint64_t offset);
// Effect: Free the block at offset.
// Requires: There must be a block currently allocated at that offset.
// Parameters:
......@@ -95,7 +95,7 @@ void block_allocator_free_block (BLOCK_ALLOCATOR ba, u_int64_t offset);
// offset (IN): The offset of the block.
u_int64_t block_allocator_block_size (BLOCK_ALLOCATOR ba, u_int64_t offset);
uint64_t block_allocator_block_size (BLOCK_ALLOCATOR ba, uint64_t offset);
// Effect: Return the size of the block that starts at offset.
// Requires: There must be a block currently allocated at that offset.
// Parameters:
......@@ -110,14 +110,14 @@ void block_allocator_print (BLOCK_ALLOCATOR ba);
// Effect: Print information about the block allocator.
// Rationale: This is probably useful only for debugging.
u_int64_t block_allocator_allocated_limit (BLOCK_ALLOCATOR ba);
uint64_t block_allocator_allocated_limit (BLOCK_ALLOCATOR ba);
// Effect: Return the unallocated block address of "infinite" size.
// That is, return the smallest address that is above all the allocated blocks.
// Rationale: When writing the root FIFO we don't know how big the block is.
// So we start at the "infinite" block, write the fifo, and then
// allocate_block_at of the correct size and offset to account for the root FIFO.
int block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, u_int64_t b, u_int64_t *offset, u_int64_t *size);
int block_allocator_get_nth_block_in_layout_order (BLOCK_ALLOCATOR ba, uint64_t b, uint64_t *offset, uint64_t *size);
// Effect: Consider the blocks in sorted order. The reserved block at the beginning is number 0. The next one is number 1 and so forth.
// Return the offset and size of the block with that number.
// Return 0 if there is a block that big, return nonzero if b is too big.
......@@ -130,8 +130,8 @@ void block_allocator_get_unused_statistics(BLOCK_ALLOCATOR ba, TOKU_DB_FRAGMENTA
// report->data_bytes is filled in
// report->checkpoint_bytes_additional is filled in
void block_allocator_merge_blockpairs_into (u_int64_t d, struct block_allocator_blockpair dst[/*d*/],
u_int64_t s, const struct block_allocator_blockpair src[/*s*/]);
void block_allocator_merge_blockpairs_into (uint64_t d, struct block_allocator_blockpair dst[/*d*/],
uint64_t s, const struct block_allocator_blockpair src[/*s*/]);
// Effect: Merge dst[d] and src[s] into dst[d+s], merging in place.
// Initially dst and src hold sorted arrays (sorted by increasing offset).
// Finally dst contains all d+s elements sorted in order.
......
This diff is collapsed.
......@@ -36,7 +36,7 @@ void toku_maybe_truncate_file_on_open(BLOCK_TABLE bt, int fd);
//Blocknums
void toku_allocate_blocknum(BLOCK_TABLE bt, BLOCKNUM *res, FT h);
void toku_allocate_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *res, FT h);
void toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *b, FT h, BOOL for_checkpoint);
void toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *b, FT h, bool for_checkpoint);
void toku_verify_blocknum_allocated(BLOCK_TABLE bt, BLOCKNUM b);
void toku_block_verify_no_data_blocks_except_root_unlocked(BLOCK_TABLE bt, BLOCKNUM root);
void toku_block_verify_no_free_blocknums(BLOCK_TABLE bt);
......@@ -45,7 +45,7 @@ void toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISK
void toku_get_descriptor_offset_size(BLOCK_TABLE bt, DISKOFF *offset, DISKOFF *size);
//Blocks and Blocknums
void toku_blocknum_realloc_on_disk(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, int fd, BOOL for_checkpoint);
void toku_blocknum_realloc_on_disk(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, int fd, bool for_checkpoint);
void toku_translate_blocknum_to_offset_size(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF *offset, DISKOFF *size);
//Serialization
......@@ -57,8 +57,8 @@ void toku_block_table_swap_for_redirect(BLOCK_TABLE old_bt, BLOCK_TABLE new_bt);
//DEBUG ONLY (ftdump included), tests included
void toku_blocknum_dump_translation(BLOCK_TABLE bt, BLOCKNUM b);
void toku_dump_translation_table(FILE *f, BLOCK_TABLE bt);
void toku_block_alloc(BLOCK_TABLE bt, u_int64_t size, u_int64_t *offset);
void toku_block_free(BLOCK_TABLE bt, u_int64_t offset);
void toku_block_alloc(BLOCK_TABLE bt, uint64_t size, uint64_t *offset);
void toku_block_free(BLOCK_TABLE bt, uint64_t offset);
typedef int(*BLOCKTABLE_CALLBACK)(BLOCKNUM b, int64_t size, int64_t address, void *extra);
enum translation_type {TRANSLATION_NONE=0,
TRANSLATION_CURRENT,
......@@ -66,7 +66,7 @@ enum translation_type {TRANSLATION_NONE=0,
TRANSLATION_CHECKPOINTED,
TRANSLATION_DEBUG};
int toku_blocktable_iterate(BLOCK_TABLE bt, enum translation_type type, BLOCKTABLE_CALLBACK f, void *extra, BOOL data_only, BOOL used_only);
int toku_blocktable_iterate(BLOCK_TABLE bt, enum translation_type type, BLOCKTABLE_CALLBACK f, void *extra, bool data_only, bool used_only);
void toku_blocktable_internal_fragmentation(BLOCK_TABLE bt, int64_t *total_sizep, int64_t *used_sizep);
void toku_block_table_get_fragmentation_unlocked(BLOCK_TABLE bt, TOKU_DB_FRAGMENTATION report);
......
This diff is collapsed.
This diff is collapsed.
......@@ -110,9 +110,9 @@ static LSN last_completed_checkpoint_lsn;
static toku_pthread_rwlock_t checkpoint_safe_lock;
static toku_pthread_rwlock_t multi_operation_lock;
static BOOL initialized = FALSE; // sanity check
static volatile BOOL locked_mo = FALSE; // true when the multi_operation write lock is held (by checkpoint)
static volatile BOOL locked_cs = FALSE; // true when the checkpoint_safe write lock is held (by checkpoint)
static bool initialized = false; // sanity check
static volatile bool locked_mo = false; // true when the multi_operation write lock is held (by checkpoint)
static volatile bool locked_cs = false; // true when the checkpoint_safe write lock is held (by checkpoint)
// Note following static functions are called from checkpoint internal logic only,
......@@ -131,7 +131,7 @@ multi_operation_lock_init(void) {
#endif
toku_pthread_rwlock_init(&multi_operation_lock, &attr);
pthread_rwlockattr_destroy(&attr);
locked_mo = FALSE;
locked_mo = false;
}
static void
......@@ -142,19 +142,19 @@ multi_operation_lock_destroy(void) {
static void
multi_operation_checkpoint_lock(void) {
toku_pthread_rwlock_wrlock(&multi_operation_lock);
locked_mo = TRUE;
locked_mo = true;
}
static void
multi_operation_checkpoint_unlock(void) {
locked_mo = FALSE;
locked_mo = false;
toku_pthread_rwlock_wrunlock(&multi_operation_lock);
}
static void
checkpoint_safe_lock_init(void) {
toku_pthread_rwlock_init(&checkpoint_safe_lock, NULL);
locked_cs = FALSE;
locked_cs = false;
}
static void
......@@ -165,12 +165,12 @@ checkpoint_safe_lock_destroy(void) {
static void
checkpoint_safe_checkpoint_lock(void) {
toku_pthread_rwlock_wrlock(&checkpoint_safe_lock);
locked_cs = TRUE;
locked_cs = true;
}
static void
checkpoint_safe_checkpoint_unlock(void) {
locked_cs = FALSE;
locked_cs = false;
toku_pthread_rwlock_wrunlock(&checkpoint_safe_lock);
}
......@@ -211,14 +211,14 @@ void
toku_checkpoint_init(void) {
multi_operation_lock_init();
checkpoint_safe_lock_init();
initialized = TRUE;
initialized = true;
}
void
toku_checkpoint_destroy(void) {
multi_operation_lock_destroy();
checkpoint_safe_lock_destroy();
initialized = FALSE;
initialized = false;
}
#define SET_CHECKPOINT_FOOTPRINT(x) STATUS_VALUE(CP_FOOTPRINT) = footprint_offset + x
......
......@@ -8,12 +8,12 @@
#ident "$Id$"
int toku_set_checkpoint_period(CACHETABLE ct, u_int32_t new_period);
int toku_set_checkpoint_period(CACHETABLE ct, uint32_t new_period);
//Effect: Change [end checkpoint (n) - begin checkpoint (n+1)] delay to
// new_period seconds. 0 means disable.
u_int32_t toku_get_checkpoint_period(CACHETABLE ct);
u_int32_t toku_get_checkpoint_period_unlocked(CACHETABLE ct);
uint32_t toku_get_checkpoint_period(CACHETABLE ct);
uint32_t toku_get_checkpoint_period_unlocked(CACHETABLE ct);
/******
......@@ -109,7 +109,7 @@ typedef enum {
} cp_status_entry;
typedef struct {
BOOL initialized;
bool initialized;
TOKU_ENGINE_STATUS_ROW_S status[CP_STATUS_NUM_ROWS];
} CHECKPOINT_STATUS_S, *CHECKPOINT_STATUS;
......
......@@ -38,7 +38,7 @@ measure_header (int fd, toku_off_t off, // read header from this offset
r=pread(fd, fbuf, 12, off);
assert(r==12);
assert(memcmp(fbuf,"tokudata",8)==0);
int bsize = toku_dtoh32(*(u_int32_t*)(fbuf+8));
int bsize = toku_dtoh32(*(uint32_t*)(fbuf+8));
//printf("Bsize=%d\n", bsize);
(*usize)+=bsize;
assert(bsize<=NSIZE);
......@@ -64,8 +64,8 @@ measure_node (int fd, toku_off_t off, // read header from this offset
assert(r==24);
//printf("fbuf[0..7]=%c%c%c%c%c%c%c%c\n", fbuf[0], fbuf[1], fbuf[2], fbuf[3], fbuf[4], fbuf[5], fbuf[6], fbuf[7]);
assert(memcmp(fbuf,"tokuleaf",8)==0 || memcmp(fbuf, "tokunode", 8)==0);
assert(8==toku_dtoh32(*(u_int32_t*)(fbuf+8))); // check file version
int bsize = toku_dtoh32(*(u_int32_t*)(fbuf+20));
assert(8==toku_dtoh32(*(uint32_t*)(fbuf+8))); // check file version
int bsize = toku_dtoh32(*(uint32_t*)(fbuf+20));
//printf("Bsize=%d\n", bsize);
(*usize)+=bsize;
......
......@@ -22,14 +22,14 @@ struct dbufio_file {
// need the mutex to modify these
struct dbufio_file *next;
BOOL second_buf_ready; // if true, the i/o thread is not touching anything.
bool second_buf_ready; // if true, the i/o thread is not touching anything.
// consumers own [0], i/o thread owns [1], they are swapped by the consumer only when the condition mutex is held and second_buf_ready is true.
char *buf[2];
size_t n_in_buf[2];
int error_code[2]; // includes errno or eof. [0] is the error code associated with buf[0], [1] is the code for buf[1]
BOOL io_done;
bool io_done;
};
......@@ -48,7 +48,7 @@ struct dbufio_fileset {
struct dbufio_file *head, *tail; // must have the mutex to fiddle with these.
size_t bufsize; // the bufsize is the constant (the same for all buffers).
BOOL panic;
bool panic;
int panic_errno;
toku_pthread_t iothread;
};
......@@ -68,11 +68,11 @@ static void panic (DBUFIO_FILESET bfs, int r) {
if (bfs->panic) return;
// may need a cilk fake mutex here to convince the race detector that it's OK.
bfs->panic_errno = r; // Don't really care about a race on this variable... Writes to it are atomic, so at least one good panic reason will be stored.
bfs->panic = TRUE;
bfs->panic = true;
return;
}
static BOOL paniced (DBUFIO_FILESET bfs) {
static bool paniced (DBUFIO_FILESET bfs) {
// may need a cilk fake mutex here to convince the race detector that it's OK.
return bfs->panic;
}
......@@ -110,7 +110,7 @@ static void* io_thread (void *v)
} else {
// Some I/O needs to be done.
//printf("%s:%d Need I/O\n", __FILE__, __LINE__);
assert(dbf->second_buf_ready == FALSE);
assert(dbf->second_buf_ready == false);
assert(!dbf->io_done);
bfs->head = dbf->next;
if (bfs->head==NULL) bfs->tail=NULL;
......@@ -132,7 +132,7 @@ static void* io_thread (void *v)
// End of file. Save it.
dbf->error_code[1] = EOF;
dbf->n_in_buf[1] = 0;
dbf->io_done = TRUE;
dbf->io_done = true;
} else {
dbf->error_code[1] = 0;
......@@ -152,7 +152,7 @@ static void* io_thread (void *v)
bfs->n_not_done--;
}
//printf("%s:%d n_not_done=%d\n", __FILE__, __LINE__, bfs->n_not_done);
dbf->second_buf_ready = TRUE;
dbf->second_buf_ready = true;
toku_cond_broadcast(&bfs->cond);
//printf("%s:%d did broadcast=%d\n", __FILE__, __LINE__, bfs->n_not_done);
// Still have the lock so go around the loop
......@@ -166,7 +166,7 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
int result = 0;
DBUFIO_FILESET MALLOC(bfs);
if (bfs==0) { result = get_error_errno(); }
BOOL mutex_inited = FALSE, cond_inited = FALSE;
bool mutex_inited = false, cond_inited = false;
if (result==0) {
MALLOC_N(N, bfs->files);
if (bfs->files==NULL) { result = get_error_errno(); }
......@@ -179,11 +179,11 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
//printf("%s:%d here\n", __FILE__, __LINE__);
if (result==0) {
toku_mutex_init(&bfs->mutex, NULL);
mutex_inited = TRUE;
mutex_inited = true;
}
if (result==0) {
toku_cond_init(&bfs->cond, NULL);
cond_inited = TRUE;
cond_inited = true;
}
if (result==0) {
bfs->N = N;
......@@ -194,7 +194,7 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
bfs->files[i].offset_in_buf = 0;
bfs->files[i].offset_in_file = 0;
bfs->files[i].next = NULL;
bfs->files[i].second_buf_ready = FALSE;
bfs->files[i].second_buf_ready = false;
for (int j=0; j<2; j++) {
if (result==0) {
MALLOC_N(bufsize, bfs->files[i].buf[j]);
......@@ -203,7 +203,7 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
bfs->files[i].n_in_buf[j] = 0;
bfs->files[i].error_code[j] = 0;
}
bfs->files[i].io_done = FALSE;
bfs->files[i].io_done = false;
{
ssize_t r = toku_os_read(bfs->files[i].fd, bfs->files[i].buf[0], bufsize);
if (r<0) {
......@@ -211,7 +211,7 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
break;
} else if (r==0) {
// it's EOF
bfs->files[i].io_done = TRUE;
bfs->files[i].io_done = true;
bfs->n_not_done--;
bfs->files[i].error_code[0] = EOF;
} else {
......@@ -222,7 +222,7 @@ int create_dbufio_fileset (DBUFIO_FILESET *bfsp, int N, int fds[/*N*/], size_t b
}
}
bfs->bufsize = bufsize;
bfs->panic = FALSE;
bfs->panic = false;
bfs->panic_errno = 0;
}
//printf("Creating IO thread\n");
......@@ -331,7 +331,7 @@ int dbufio_fileset_read (DBUFIO_FILESET bfs, int filenum, void *buf_v, size_t co
dbf->buf[1] = tmp;
}
dbf->error_code[0] = dbf->error_code[1];
dbf->second_buf_ready = FALSE;
dbf->second_buf_ready = false;
dbf->offset_in_buf = 0;
if (!dbf->io_done) {
// Don't enqueue it if the I/O is all done.
......
......@@ -153,7 +153,7 @@ void toku_fifo_clone(FIFO orig_fifo, FIFO* cloned_fifo) {
*cloned_fifo = new_fifo;
}
BOOL toku_are_fifos_same(FIFO fifo1, FIFO fifo2) {
bool toku_are_fifos_same(FIFO fifo1, FIFO fifo2) {
return (
fifo1->memory_used == fifo2->memory_used &&
memcmp(fifo1->memory, fifo2->memory, fifo1->memory_used) == 0
......
......@@ -63,7 +63,7 @@ unsigned long toku_fifo_memory_size_in_use(FIFO fifo); // return how much memor
unsigned long toku_fifo_memory_footprint(FIFO fifo); // return how much memory the fifo occupies
//These two are problematic, since I don't want to malloc() the bytevecs, but dequeueing the fifo frees the memory.
//int toku_fifo_peek_deq (FIFO, bytevec *key, ITEMLEN *keylen, bytevec *data, ITEMLEN *datalen, u_int32_t *type, TXNID *xid);
//int toku_fifo_peek_deq (FIFO, bytevec *key, ITEMLEN *keylen, bytevec *data, ITEMLEN *datalen, uint32_t *type, TXNID *xid);
//int toku_fifo_peek_deq_cmdstruct (FIFO, FT_MSG, DBT*, DBT*); // fill in the FT_MSG, using the two DBTs for the DBT part.
void toku_fifo_iterate(FIFO, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, void*), void*);
......@@ -78,7 +78,7 @@ void toku_fifo_iterate(FIFO, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,IT
MSN msnvar = e->msn; \
XIDS xidsvar = &e->xids_s; \
bytevec keyvar = xids_get_end_of_array(xidsvar); \
bytevec datavar = (const u_int8_t*)keyvar + e->keylen; \
bytevec datavar = (const uint8_t*)keyvar + e->keylen; \
bool is_freshvar = e->is_fresh; \
body; \
} })
......@@ -94,7 +94,7 @@ const struct fifo_entry *toku_fifo_get_entry(FIFO fifo, long off);
void toku_fifo_clone(FIFO orig_fifo, FIFO* cloned_fifo);
BOOL toku_are_fifos_same(FIFO fifo1, FIFO fifo2);
bool toku_are_fifos_same(FIFO fifo1, FIFO fifo2);
......
......@@ -13,7 +13,7 @@
static void
ftnode_get_key_and_fullhash(
BLOCKNUM* cachekey,
u_int32_t* fullhash,
uint32_t* fullhash,
void* extra)
{
FT h = (FT) extra;
......@@ -26,18 +26,18 @@ ftnode_get_key_and_fullhash(
void
cachetable_put_empty_node_with_dep_nodes(
FT h,
u_int32_t num_dependent_nodes,
uint32_t num_dependent_nodes,
FTNODE* dependent_nodes,
BLOCKNUM* name, //output
u_int32_t* fullhash, //output
uint32_t* fullhash, //output
FTNODE* result)
{
FTNODE XMALLOC(new_node);
CACHEFILE dependent_cf[num_dependent_nodes];
BLOCKNUM dependent_keys[num_dependent_nodes];
u_int32_t dependent_fullhash[num_dependent_nodes];
uint32_t dependent_fullhash[num_dependent_nodes];
enum cachetable_dirty dependent_dirty_bits[num_dependent_nodes];
for (u_int32_t i = 0; i < num_dependent_nodes; i++) {
for (uint32_t i = 0; i < num_dependent_nodes; i++) {
dependent_cf[i] = h->cf;
dependent_keys[i] = dependent_nodes[i]->thisnodename;
dependent_fullhash[i] = toku_cachetable_hash(h->cf, dependent_nodes[i]->thisnodename);
......@@ -69,10 +69,10 @@ create_new_ftnode_with_dep_nodes(
FTNODE *result,
int height,
int n_children,
u_int32_t num_dependent_nodes,
uint32_t num_dependent_nodes,
FTNODE* dependent_nodes)
{
u_int32_t fullhash = 0;
uint32_t fullhash = 0;
BLOCKNUM name;
cachetable_put_empty_node_with_dep_nodes(
......@@ -122,18 +122,18 @@ int
toku_pin_ftnode(
FT_HANDLE brt,
BLOCKNUM blocknum,
u_int32_t fullhash,
uint32_t fullhash,
UNLOCKERS unlockers,
ANCESTORS ancestors,
const PIVOT_BOUNDS bounds,
FTNODE_FETCH_EXTRA bfe,
BOOL may_modify_node,
BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this
bool may_modify_node,
bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this
FTNODE *node_p,
BOOL* msgs_applied)
bool* msgs_applied)
{
void *node_v;
*msgs_applied = FALSE;
*msgs_applied = false;
int r = toku_cachetable_get_and_pin_nonblocking(
brt->ft->cf,
blocknum,
......@@ -165,19 +165,19 @@ void
toku_pin_ftnode_off_client_thread(
FT h,
BLOCKNUM blocknum,
u_int32_t fullhash,
uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe,
BOOL may_modify_node,
u_int32_t num_dependent_nodes,
bool may_modify_node,
uint32_t num_dependent_nodes,
FTNODE* dependent_nodes,
FTNODE *node_p)
{
void *node_v;
CACHEFILE dependent_cf[num_dependent_nodes];
BLOCKNUM dependent_keys[num_dependent_nodes];
u_int32_t dependent_fullhash[num_dependent_nodes];
uint32_t dependent_fullhash[num_dependent_nodes];
enum cachetable_dirty dependent_dirty_bits[num_dependent_nodes];
for (u_int32_t i = 0; i < num_dependent_nodes; i++) {
for (uint32_t i = 0; i < num_dependent_nodes; i++) {
dependent_cf[i] = h->cf;
dependent_keys[i] = dependent_nodes[i]->thisnodename;
dependent_fullhash[i] = toku_cachetable_hash(h->cf, dependent_nodes[i]->thisnodename);
......
......@@ -18,10 +18,10 @@
void
cachetable_put_empty_node_with_dep_nodes(
FT h,
u_int32_t num_dependent_nodes,
uint32_t num_dependent_nodes,
FTNODE* dependent_nodes,
BLOCKNUM* name, //output
u_int32_t* fullhash, //output
uint32_t* fullhash, //output
FTNODE* result
);
......@@ -36,7 +36,7 @@ create_new_ftnode_with_dep_nodes(
FTNODE *result,
int height,
int n_children,
u_int32_t num_dependent_nodes,
uint32_t num_dependent_nodes,
FTNODE* dependent_nodes
);
......@@ -64,15 +64,15 @@ int
toku_pin_ftnode(
FT_HANDLE brt,
BLOCKNUM blocknum,
u_int32_t fullhash,
uint32_t fullhash,
UNLOCKERS unlockers,
ANCESTORS ancestors,
const PIVOT_BOUNDS pbounds,
FTNODE_FETCH_EXTRA bfe,
BOOL may_modify_node,
BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this
bool may_modify_node,
bool apply_ancestor_messages, // this bool is probably temporary, for #3972, once we know how range query estimates work, will revisit this
FTNODE *node_p,
BOOL* msgs_applied
bool* msgs_applied
);
/**
......@@ -86,10 +86,10 @@ void
toku_pin_ftnode_off_client_thread(
FT h,
BLOCKNUM blocknum,
u_int32_t fullhash,
uint32_t fullhash,
FTNODE_FETCH_EXTRA bfe,
BOOL may_modify_node,
u_int32_t num_dependent_nodes,
bool may_modify_node,
uint32_t num_dependent_nodes,
FTNODE* dependent_nodes,
FTNODE *node_p
);
......
This diff is collapsed.
......@@ -88,8 +88,8 @@ ftleaf_split(
FTNODE *nodea,
FTNODE *nodeb,
DBT *splitk,
BOOL create_new_node,
u_int32_t num_dependent_nodes,
bool create_new_node,
uint32_t num_dependent_nodes,
FTNODE* dependent_nodes
);
......@@ -109,7 +109,7 @@ ft_nonleaf_split(
FTNODE *nodea,
FTNODE *nodeb,
DBT *splitk,
u_int32_t num_dependent_nodes,
uint32_t num_dependent_nodes,
FTNODE* dependent_nodes
);
......
......@@ -75,7 +75,7 @@ hot_set_highest_key(struct hot_flusher_extra *flusher)
// Otherwise, let's copy all the contents from one key to the other.
void *source = flusher->max_current_key.data;
void *destination = flusher->highest_pivot_key.data;
u_int32_t size = flusher->max_current_key.size;
uint32_t size = flusher->max_current_key.size;
destination = toku_xrealloc(destination, size);
memcpy(destination, source, size);
......@@ -95,7 +95,7 @@ hot_set_key(DBT *key, FTNODE parent, int childnum)
DBT *pivot = &parent->childkeys[childnum];
void *data = key->data;
u_int32_t size = pivot->size;
uint32_t size = pivot->size;
data = toku_xrealloc(data, size);
memcpy(data, pivot->data, size);
......@@ -263,7 +263,7 @@ toku_ft_hot_optimize(FT_HANDLE brt,
do {
FTNODE root;
CACHEKEY root_key;
u_int32_t fullhash;
uint32_t fullhash;
{
toku_ft_grab_treelock(brt->ft);
......@@ -277,7 +277,7 @@ toku_ft_hot_optimize(FT_HANDLE brt,
(BLOCKNUM) root_key,
fullhash,
&bfe,
TRUE,
true,
0,
NULL,
&root);
......@@ -349,7 +349,7 @@ toku_ft_hot_optimize(FT_HANDLE brt,
// More diagnostics.
{
BOOL success = false;
bool success = false;
if (r == 0) { success = true; }
{
......
This diff is collapsed.
......@@ -108,8 +108,8 @@ check_node_info_checksum(struct rbuf *rb)
{
int r = 0;
// Verify checksum of header stored.
u_int32_t checksum = x1764_memory(rb->buf, rb->ndone);
u_int32_t stored_checksum = rbuf_int(rb);
uint32_t checksum = x1764_memory(rb->buf, rb->ndone);
uint32_t stored_checksum = rbuf_int(rb);
if (stored_checksum != checksum) {
// TODO: dump_bad_block(rb->buf, rb->size);
......@@ -143,8 +143,8 @@ int
check_legacy_end_checksum(struct rbuf *rb)
{
int r = 0;
u_int32_t expected_xsum = rbuf_int(rb);
u_int32_t actual_xsum = x1764_memory(rb->buf, rb->size - 4);
uint32_t expected_xsum = rbuf_int(rb);
uint32_t actual_xsum = x1764_memory(rb->buf, rb->size - 4);
if (expected_xsum != actual_xsum) {
r = TOKUDB_BAD_CHECKSUM;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -50,7 +50,7 @@ typedef struct ft_search {
// There also remains a potential thrashing problem. When we get a TOKUDB_TRY_AGAIN, we unpin everything. There's
// no guarantee that we will get everything pinned again. We ought to keep nodes pinned when we retry, except that on the
// way out with a DB_NOTFOUND we ought to unpin those nodes. See #3528.
BOOL have_pivot_bound;
bool have_pivot_bound;
DBT pivot_bound;
} ft_search_t;
......@@ -60,7 +60,7 @@ static inline ft_search_t *ft_search_init(ft_search_t *so, ft_search_compare_fun
so->direction = direction;
so->k = k;
so->context = context;
so->have_pivot_bound = FALSE;
so->have_pivot_bound = false;
return so;
}
......
This diff is collapsed.
......@@ -32,7 +32,7 @@ next_dummymsn(void) {
}
BOOL ignore_if_was_already_open;
bool ignore_if_was_already_open;
int toku_testsetup_leaf(FT_HANDLE brt, BLOCKNUM *blocknum, int n_children, char **keys, int *keylens) {
FTNODE node;
assert(testsetup_initialized);
......@@ -93,7 +93,7 @@ int toku_testsetup_get_sersize(FT_HANDLE brt, BLOCKNUM diskoff) // Return the si
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback,
TRUE,
true,
&bfe
);
assert(r==0);
......@@ -121,7 +121,7 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE brt, BLOCKNUM blocknum, const char
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback,
TRUE,
true,
&bfe
);
if (r!=0) return r;
......@@ -169,7 +169,7 @@ toku_pin_node_with_min_bfe(FTNODE* node, BLOCKNUM b, FT_HANDLE t)
b,
toku_cachetable_hash(t->ft->cf, b),
&bfe,
TRUE,
true,
0,
NULL,
node
......@@ -194,7 +194,7 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE brt, BLOCKNUM blocknum, enum ft_
toku_ftnode_fetch_callback,
toku_ftnode_pf_req_callback,
toku_ftnode_pf_callback,
TRUE,
true,
&bfe
);
if (r!=0) return r;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -10,15 +10,15 @@
#include "ft_msg.h"
u_int32_t
uint32_t
ft_msg_get_keylen(FT_MSG ft_msg) {
u_int32_t rval = ft_msg->u.id.key->size;
uint32_t rval = ft_msg->u.id.key->size;
return rval;
}
u_int32_t
uint32_t
ft_msg_get_vallen(FT_MSG ft_msg) {
u_int32_t rval = ft_msg->u.id.val->size;
uint32_t rval = ft_msg->u.id.val->size;
return rval;
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -74,7 +74,7 @@ int ft_loader_call_error_function(ft_loader_error_callback loader_error) {
error_callback_lock(loader_error);
r = loader_error->error;
if (r && loader_error->error_callback && !loader_error->did_callback) {
loader_error->did_callback = TRUE;
loader_error->did_callback = true;
loader_error->error_callback(loader_error->db,
loader_error->which_db,
loader_error->error,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment