Commit d8601e19 authored by Leif Walsh's avatar Leif Walsh Committed by Yoni Fogel

[t:5069] move toku_mutex_t's locked member to only exist when compiling with...

[t:5069] move toku_mutex_t's locked member to only exist when compiling with TOKU_PTHREAD_DEBUG, for performance reasons

git-svn-id: file:///svn/toku/tokudb@44506 c7de825b-a66e-492c-adef-691d508d4ae1
parent 5ecfb1c8
......@@ -88,7 +88,7 @@ static inline void unlock_for_blocktable (BLOCK_TABLE bt);
static void
ft_set_dirty(FT ft, BOOL for_checkpoint){
assert(toku_mutex_is_locked(&ft->blocktable->mutex));
toku_mutex_assert_locked(&ft->blocktable->mutex);
assert(ft->h->type == FT_CURRENT);
if (for_checkpoint) {
assert(ft->checkpoint_header->type == FT_CHECKPOINT_INPROGRESS);
......@@ -101,7 +101,7 @@ ft_set_dirty(FT ft, BOOL for_checkpoint){
static void
maybe_truncate_file(BLOCK_TABLE bt, int fd, u_int64_t size_needed_before) {
assert(toku_mutex_is_locked(&bt->mutex));
toku_mutex_assert_locked(&bt->mutex);
u_int64_t new_size_needed = block_allocator_allocated_limit(bt->block_allocator);
//Save a call to toku_os_get_file_size (kernel call) if unlikely to be useful.
if (new_size_needed < size_needed_before && new_size_needed < bt->safe_file_size) {
......@@ -210,7 +210,7 @@ maybe_optimize_translation(struct translation *t) {
// block table must be locked by caller of this function
void
toku_block_translation_note_start_checkpoint_unlocked (BLOCK_TABLE bt) {
assert(toku_mutex_is_locked(&bt->mutex));
toku_mutex_assert_locked(&bt->mutex);
// Copy current translation to inprogress translation.
assert(bt->inprogress.block_translation == NULL);
//We're going to do O(n) work to copy the translation, so we
......@@ -359,7 +359,7 @@ toku_ft_lock (FT ft) {
void
toku_ft_unlock (FT ft) {
BLOCK_TABLE bt = ft->blocktable;
assert(toku_mutex_is_locked(&bt->mutex));
toku_mutex_assert_locked(&bt->mutex);
unlock_for_blocktable(bt);
}
......@@ -403,7 +403,7 @@ translation_prevents_freeing(struct translation *t, BLOCKNUM b, struct block_tra
static void
blocknum_realloc_on_disk_internal (BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, BOOL for_checkpoint) {
assert(toku_mutex_is_locked(&bt->mutex));
toku_mutex_assert_locked(&bt->mutex);
ft_set_dirty(ft, for_checkpoint);
struct translation *t = &bt->current;
......@@ -467,7 +467,7 @@ toku_blocknum_realloc_on_disk (BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF
// Purpose of this function is to figure out where to put the inprogress btt on disk, allocate space for it there.
static void
blocknum_alloc_translation_on_disk_unlocked (BLOCK_TABLE bt) {
assert(toku_mutex_is_locked(&bt->mutex));
toku_mutex_assert_locked(&bt->mutex);
struct translation *t = &bt->inprogress;
assert(t->block_translation);
......@@ -560,7 +560,7 @@ maybe_expand_translation (struct translation *t) {
void
toku_allocate_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *res, FT ft) {
assert(toku_mutex_is_locked(&bt->mutex));
toku_mutex_assert_locked(&bt->mutex);
BLOCKNUM result;
struct translation * t = &bt->current;
if (t->blocknum_freelist_head.b == freelist_null.b) {
......@@ -608,7 +608,7 @@ static void
free_blocknum_unlocked(BLOCK_TABLE bt, BLOCKNUM *bp, FT ft, BOOL for_checkpoint) {
// Effect: Free a blocknum.
// If the blocknum holds the only reference to a block on disk, free that block
assert(toku_mutex_is_locked(&bt->mutex));
toku_mutex_assert_locked(&bt->mutex);
BLOCKNUM b = *bp;
bp->b = 0; //Remove caller's reference.
......@@ -944,7 +944,7 @@ toku_blocktable_internal_fragmentation (BLOCK_TABLE bt, int64_t *total_sizep, in
void
toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT ft) {
assert(toku_mutex_is_locked(&bt->mutex));
toku_mutex_assert_locked(&bt->mutex);
BLOCKNUM b = make_blocknum(RESERVED_BLOCKNUM_DESCRIPTOR);
blocknum_realloc_on_disk_internal(bt, b, size, offset, ft, FALSE);
}
......
......@@ -32,8 +32,8 @@ typedef struct timespec toku_timespec_t;
typedef struct toku_mutex {
pthread_mutex_t pmutex;
bool locked;
#if TOKU_PTHREAD_DEBUG
bool locked;
pthread_t owner; // = pthread_self(); // for debugging
#endif
} toku_mutex_t;
......@@ -42,12 +42,16 @@ static inline void
toku_mutex_init(toku_mutex_t *mutex, const toku_pthread_mutexattr_t *attr) {
int r = pthread_mutex_init(&mutex->pmutex, attr);
assert_zero(r);
#if TOKU_PTHREAD_DEBUG
mutex->locked = false;
#endif
}
static inline void
toku_mutex_destroy(toku_mutex_t *mutex) {
assert(!mutex->locked);
#if TOKU_PTHREAD_DEBUG
invariant(!mutex->locked);
#endif
int r = pthread_mutex_destroy(&mutex->pmutex);
assert_zero(r);
}
......@@ -56,28 +60,34 @@ static inline void
toku_mutex_lock(toku_mutex_t *mutex) {
int r = pthread_mutex_lock(&mutex->pmutex);
assert_zero(r);
assert(!mutex->locked);
mutex->locked = true;
#if TOKU_PTHREAD_DEBUG
invariant(!mutex->locked);
mutex->locked = true;
mutex->owner = pthread_self();
#endif
}
static inline void
toku_mutex_unlock(toku_mutex_t *mutex) {
assert(mutex->locked);
mutex->locked = false;
#if TOKU_PTHREAD_DEBUG
invariant(mutex->locked);
mutex->locked = false;
mutex->owner = 0;
#endif
int r = pthread_mutex_unlock(&mutex->pmutex);
assert_zero(r);
}
static inline bool
toku_mutex_is_locked(toku_mutex_t *mutex) {
return mutex->locked;
#if TOKU_PTHREAD_DEBUG
static inline void
toku_mutex_assert_locked(toku_mutex_t *mutex) {
invariant(mutex->locked);
}
#else
static inline void
toku_mutex_assert_locked(toku_mutex_t *mutex __attribute__((unused))) {
}
#endif
typedef struct toku_cond {
pthread_cond_t pcond;
......@@ -97,31 +107,31 @@ toku_cond_destroy(toku_cond_t *cond) {
static inline void
toku_cond_wait(toku_cond_t *cond, toku_mutex_t *mutex) {
assert(mutex->locked);
mutex->locked = false;
#if TOKU_PTHREAD_DEBUG
invariant(mutex->locked);
mutex->locked = false;
mutex->owner = 0;
#endif
int r = pthread_cond_wait(&cond->pcond, &mutex->pmutex);
assert_zero(r);
assert(!mutex->locked);
mutex->locked = true;
#if TOKU_PTHREAD_DEBUG
invariant(!mutex->locked);
mutex->locked = true;
mutex->owner = pthread_self();
#endif
}
static inline int
toku_cond_timedwait(toku_cond_t *cond, toku_mutex_t *mutex, toku_timespec_t *wakeup_at) {
assert(mutex->locked);
mutex->locked = false;
#if TOKU_PTHREAD_DEBUG
invariant(mutex->locked);
mutex->locked = false;
mutex->owner = 0;
#endif
int r = pthread_cond_timedwait(&cond->pcond, &mutex->pmutex, wakeup_at);
assert(!mutex->locked);
mutex->locked = true;
#if TOKU_PTHREAD_DEBUG
invariant(!mutex->locked);
mutex->locked = true;
mutex->owner = pthread_self();
#endif
return r;
......
......@@ -1464,7 +1464,8 @@ cleanup:
static int
lt_try_acquire_range_read_lock(toku_lock_tree* tree, TXNID txn, const DBT* key_left, const DBT* key_right) {
assert(tree && toku_mutex_is_locked(&tree->mutex)); // locked by this thread
assert(tree);
toku_mutex_assert_locked(&tree->mutex); // locked by this thread
int r;
toku_point left;
......@@ -1713,7 +1714,8 @@ cleanup:
// run escalation algorithm on a given locktree
static int
lt_do_escalation(toku_lock_tree* lt) {
assert(lt && toku_mutex_is_locked(&lt->mutex));
assert(lt);
toku_mutex_assert_locked(&lt->mutex);
int r = ENOSYS;
......@@ -1811,7 +1813,7 @@ toku_lt_acquire_read_lock(toku_lock_tree* tree, TXNID txn, const DBT* key) {
static int
lt_try_acquire_range_write_lock(toku_lock_tree* tree, TXNID txn, const DBT* key_left, const DBT* key_right) {
assert(toku_mutex_is_locked(&tree->mutex));
toku_mutex_assert_locked(&tree->mutex);
int r;
toku_point left;
......@@ -2052,7 +2054,8 @@ lt_border_delete(toku_lock_tree* tree, toku_range_tree* rt) {
static inline int
lt_unlock_txn(toku_lock_tree* tree, TXNID txn) {
assert(tree && toku_mutex_is_locked(&tree->mutex));
assert(tree);
toku_mutex_assert_locked(&tree->mutex);
int r;
toku_range_tree *selfwrite = toku_lt_ifexist_selfwrite(tree, txn);
......@@ -2376,7 +2379,8 @@ static void lt_check_deadlock(toku_lock_tree *tree, toku_lock_request *a_lock_re
static int
lock_request_start(toku_lock_request *lock_request, toku_lock_tree *tree, bool copy_keys_if_not_granted, bool do_escalation) {
assert(lock_request->state == LOCK_REQUEST_INIT);
assert(tree && toku_mutex_is_locked(&tree->mutex));
assert(tree);
toku_mutex_assert_locked(&tree->mutex);
int r = 0;
switch (lock_request->type) {
case LOCK_REQUEST_READ:
......@@ -2449,7 +2453,8 @@ toku_lt_acquire_lock_request_with_default_timeout(toku_lock_tree *tree, toku_loc
static void
lt_retry_lock_requests(toku_lock_tree *tree) {
assert(tree && toku_mutex_is_locked(&tree->mutex));
assert(tree);
toku_mutex_assert_locked(&tree->mutex);
for (uint32_t i = 0; i < toku_omt_size(tree->lock_requests); ) {
int r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment