Commit 70d1f017 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] filtered wakeups: apply to buffer_head functions

From: William Lee Irwin III <wli@holomorphy.com>

This patch implements wake-one semantics for buffer_head wakeups in a single
step.  The buffer_head being waited on is passed to the waiter's wakeup
function by the waker, and the wakeup function compares that to the a pointer
stored in its on-stack structure and checking the readiness of the bit there
also.  Wake-one semantics are achieved by using WQ_FLAG_EXCLUSIVE in the
codepaths waiting to acquire the bit for mutual exclusion.
parent 08aaf1cc
...@@ -43,6 +43,26 @@ static void invalidate_bh_lrus(void); ...@@ -43,6 +43,26 @@ static void invalidate_bh_lrus(void);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
struct bh_wait_queue {
struct buffer_head *bh;
wait_queue_t wait;
};
#define __DEFINE_BH_WAIT(name, b, f) \
struct bh_wait_queue name = { \
.bh = b, \
.wait = { \
.task = current, \
.flags = f, \
.func = bh_wake_function, \
.task_list = \
LIST_HEAD_INIT(name.wait.task_list),\
}, \
}
#define DEFINE_BH_WAIT(name, bh) __DEFINE_BH_WAIT(name, bh, 0)
#define DEFINE_BH_WAIT_EXCLUSIVE(name, bh) \
__DEFINE_BH_WAIT(name, bh, WQ_FLAG_EXCLUSIVE)
/* /*
* Hashed waitqueue_head's for wait_on_buffer() * Hashed waitqueue_head's for wait_on_buffer()
*/ */
...@@ -74,10 +94,50 @@ void wake_up_buffer(struct buffer_head *bh) ...@@ -74,10 +94,50 @@ void wake_up_buffer(struct buffer_head *bh)
smp_mb(); smp_mb();
if (waitqueue_active(wq)) if (waitqueue_active(wq))
wake_up_all(wq); __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, bh);
} }
EXPORT_SYMBOL(wake_up_buffer); EXPORT_SYMBOL(wake_up_buffer);
static int bh_wake_function(wait_queue_t *wait, unsigned mode,
int sync, void *key)
{
struct buffer_head *bh = key;
struct bh_wait_queue *wq;
wq = container_of(wait, struct bh_wait_queue, wait);
if (wq->bh != bh || buffer_locked(bh))
return 0;
else
return autoremove_wake_function(wait, mode, sync, key);
}
static void sync_buffer(struct buffer_head *bh)
{
struct block_device *bd;
smp_mb();
bd = bh->b_bdev;
if (bd)
blk_run_address_space(bd->bd_inode->i_mapping);
}
void fastcall __lock_buffer(struct buffer_head *bh)
{
wait_queue_head_t *wqh = bh_waitq_head(bh);
DEFINE_BH_WAIT_EXCLUSIVE(wait, bh);
do {
prepare_to_wait_exclusive(wqh, &wait.wait,
TASK_UNINTERRUPTIBLE);
if (buffer_locked(bh)) {
sync_buffer(bh);
io_schedule();
}
} while (test_set_buffer_locked(bh));
finish_wait(wqh, &wait.wait);
}
EXPORT_SYMBOL(__lock_buffer);
void fastcall unlock_buffer(struct buffer_head *bh) void fastcall unlock_buffer(struct buffer_head *bh)
{ {
clear_buffer_locked(bh); clear_buffer_locked(bh);
...@@ -93,20 +153,16 @@ void fastcall unlock_buffer(struct buffer_head *bh) ...@@ -93,20 +153,16 @@ void fastcall unlock_buffer(struct buffer_head *bh)
void __wait_on_buffer(struct buffer_head * bh) void __wait_on_buffer(struct buffer_head * bh)
{ {
wait_queue_head_t *wqh = bh_waitq_head(bh); wait_queue_head_t *wqh = bh_waitq_head(bh);
DEFINE_WAIT(wait); DEFINE_BH_WAIT(wait, bh);
do { do {
prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
if (buffer_locked(bh)) { if (buffer_locked(bh)) {
struct block_device *bd; sync_buffer(bh);
smp_mb();
bd = bh->b_bdev;
if (bd)
blk_run_address_space(bd->bd_inode->i_mapping);
io_schedule(); io_schedule();
} }
} while (buffer_locked(bh)); } while (buffer_locked(bh));
finish_wait(wqh, &wait); finish_wait(wqh, &wait.wait);
} }
static void static void
......
...@@ -633,12 +633,20 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, ...@@ -633,12 +633,20 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
if (jh->b_jlist == BJ_Shadow) { if (jh->b_jlist == BJ_Shadow) {
wait_queue_head_t *wqh; wait_queue_head_t *wqh;
DEFINE_WAIT(wait);
JBUFFER_TRACE(jh, "on shadow: sleep"); JBUFFER_TRACE(jh, "on shadow: sleep");
jbd_unlock_bh_state(bh); jbd_unlock_bh_state(bh);
/* commit wakes up all shadow buffers after IO */ /* commit wakes up all shadow buffers after IO */
wqh = bh_waitq_head(jh2bh(jh)); wqh = bh_waitq_head(bh);
wait_event(*wqh, (jh->b_jlist != BJ_Shadow)); for ( ; ; ) {
prepare_to_wait(wqh, &wait,
TASK_UNINTERRUPTIBLE);
if (jh->b_jlist != BJ_Shadow)
break;
schedule();
}
finish_wait(wqh, &wait);
goto repeat; goto repeat;
} }
......
...@@ -170,6 +170,7 @@ struct buffer_head *__bread(struct block_device *, sector_t block, int size); ...@@ -170,6 +170,7 @@ struct buffer_head *__bread(struct block_device *, sector_t block, int size);
struct buffer_head *alloc_buffer_head(int gfp_flags); struct buffer_head *alloc_buffer_head(int gfp_flags);
void free_buffer_head(struct buffer_head * bh); void free_buffer_head(struct buffer_head * bh);
void FASTCALL(unlock_buffer(struct buffer_head *bh)); void FASTCALL(unlock_buffer(struct buffer_head *bh));
void FASTCALL(__lock_buffer(struct buffer_head *bh));
void ll_rw_block(int, int, struct buffer_head * bh[]); void ll_rw_block(int, int, struct buffer_head * bh[]);
void sync_dirty_buffer(struct buffer_head *bh); void sync_dirty_buffer(struct buffer_head *bh);
void submit_bh(int, struct buffer_head *); void submit_bh(int, struct buffer_head *);
...@@ -279,8 +280,8 @@ static inline void wait_on_buffer(struct buffer_head *bh) ...@@ -279,8 +280,8 @@ static inline void wait_on_buffer(struct buffer_head *bh)
static inline void lock_buffer(struct buffer_head *bh) static inline void lock_buffer(struct buffer_head *bh)
{ {
while (test_set_buffer_locked(bh)) if (test_set_buffer_locked(bh))
__wait_on_buffer(bh); __lock_buffer(bh);
} }
#endif /* _LINUX_BUFFER_HEAD_H */ #endif /* _LINUX_BUFFER_HEAD_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment