Commit 52b0848d authored by Barry Perlman's avatar Barry Perlman Committed by Yoni Fogel

Addresses #1963 refs[t:1963] (expected to be) minor optimizations to maybe_get_and_pin

git-svn-id: file:///svn/toku/tokudb@14131 c7de825b-a66e-492c-adef-691d508d4ae1
parent 2cda63bc
...@@ -1147,20 +1147,20 @@ int toku_cachetable_maybe_get_and_pin (CACHEFILE cachefile, CACHEKEY key, u_int3 ...@@ -1147,20 +1147,20 @@ int toku_cachetable_maybe_get_and_pin (CACHEFILE cachefile, CACHEKEY key, u_int3
cachetable_lock(ct); cachetable_lock(ct);
for (p=ct->table[fullhash&(ct->table_size-1)]; p; p=p->hash_chain) { for (p=ct->table[fullhash&(ct->table_size-1)]; p; p=p->hash_chain) {
count++; count++;
if (p->key.b==key.b && p->cachefile==cachefile && p->state == CTPAIR_IDLE) { if (p->key.b==key.b && p->cachefile==cachefile) {
if (p->state == CTPAIR_IDLE && //If not idle, will require a stall and/or will be clean once it is idle
if (p->checkpoint_pending || !p->dirty) { !p->checkpoint_pending && //If checkpoint pending, we would need to first write it, which would make it clean
goto finish; p->dirty &&
} rwlock_try_prefer_read_lock(&p->rwlock, ct->mutex) == 0 //Grab read lock. If any stall would be necessary that means it would be clean AFTER the stall, so don't even try to stall
*value = p->value; ) {
rwlock_read_lock(&p->rwlock, ct->mutex); *value = p->value;
lru_touch(ct,p); lru_touch(ct,p);
r = 0; r = 0;
//printf("%s:%d cachetable_maybe_get_and_pin(%lld)--> %p\n", __FILE__, __LINE__, key, *value); //printf("%s:%d cachetable_maybe_get_and_pin(%lld)--> %p\n", __FILE__, __LINE__, key, *value);
}
break; break;
} }
} }
finish:
note_hash_count(count); note_hash_count(count);
cachetable_unlock(ct); cachetable_unlock(ct);
return r; return r;
......
...@@ -82,6 +82,20 @@ static inline void rwlock_prefer_read_lock(RWLOCK rwlock, toku_pthread_mutex_t * ...@@ -82,6 +82,20 @@ static inline void rwlock_prefer_read_lock(RWLOCK rwlock, toku_pthread_mutex_t *
rwlock_read_lock(rwlock, mutex); rwlock_read_lock(rwlock, mutex);
} }
// try to acquire a read lock preferentially (ignore request for write lock).
// If a stall would happen (write lock is held), instead return EBUSY immediately.
// expects: mutex is locked
static inline int rwlock_try_prefer_read_lock(RWLOCK rwlock, toku_pthread_mutex_t *UU(mutex)) {
int r = EBUSY;
if (!rwlock->writer) {
rwlock->reader++;
r = 0;
}
return r;
}
// release a read lock // release a read lock
// expects: mutex is locked // expects: mutex is locked
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment