Commit 012fdfd2 authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

closes #5272, address CR comments

git-svn-id: file:///svn/toku/tokudb@45930 c7de825b-a66e-492c-adef-691d508d4ae1
parent 23b9b3d1
......@@ -2518,6 +2518,7 @@ toku_cachetable_close (CACHETABLE *ctp) {
toku_kibbutz_destroy(ct->checkpointing_kibbutz);
bjm_destroy(ct->checkpoint_clones_bjm);
toku_cond_destroy(&ct->flow_control_cond);
toku_mutex_destroy(&ct->mutex);
toku_free(ct->table);
toku_free(ct->env_dir);
toku_free(ct);
......@@ -2632,14 +2633,10 @@ int toku_cachetable_unpin_and_remove (
//
cachetable_remove_pair(ct, p);
if (nb_mutex_blocked_writers(&p->value_nb_mutex)>0) {
toku_cond_t cond;
toku_cond_init(&cond, NULL);
nb_mutex_wait_for_users(
&p->value_nb_mutex,
&ct->mutex,
&cond
&ct->mutex
);
toku_cond_destroy(&cond);
assert(!p->checkpoint_pending);
assert(p->attr.cache_pressure_size == 0);
}
......
......@@ -8,12 +8,6 @@
//
// The kibbutz is another threadpool meant to do arbitrary work.
// It is introduced in Dr. No, and as of Dr. No, the only work kibbutzim
// do is flusher thread work. In Dr. No, we already have a threadpool for
// the writer threads and a threadpool for serializing ftnodes. A natural
// question is why did we introduce another threadpool in Dr. No. The short
// answer is that this was the simplest way to get the flusher threads work
// done.
//
typedef struct kibbutz *KIBBUTZ;
......
......@@ -55,11 +55,10 @@ static inline void nb_mutex_unlock(NB_MUTEX nb_mutex) {
static inline void nb_mutex_wait_for_users(
NB_MUTEX nb_mutex,
toku_mutex_t *mutex,
toku_cond_t* cond
toku_mutex_t *mutex
)
{
rwlock_wait_for_users(&nb_mutex->lock, mutex, cond);
rwlock_wait_for_users(&nb_mutex->lock, mutex);
}
// returns: the number of writers who are waiting for the lock
......
......@@ -154,16 +154,18 @@ static inline int rwlock_writers(RWLOCK rwlock) {
static inline void rwlock_wait_for_users(
RWLOCK rwlock,
toku_mutex_t *mutex,
toku_cond_t* cond
toku_mutex_t *mutex
)
{
assert(!rwlock->wait_users_go_to_zero);
toku_cond_t cond;
toku_cond_init(&cond, NULL);
while (rwlock_users(rwlock) > 0) {
rwlock->wait_users_go_to_zero = cond;
toku_cond_wait(cond, mutex);
rwlock->wait_users_go_to_zero = &cond;
toku_cond_wait(&cond, mutex);
}
rwlock->wait_users_go_to_zero = NULL;
toku_cond_destroy(&cond);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment