Commit fd4d36bf authored by William Lee Irwin III's avatar William Lee Irwin III Committed by Linus Torvalds

[PATCH] standardize bit waiting data type

Eliminate specialized page and bh waitqueue hashing structures in favor of
a standardized structure, using wake_up_bit() to wake waiters using the
standardized wait_bit_key structure.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d7988992
...@@ -45,26 +45,6 @@ static void invalidate_bh_lrus(void); ...@@ -45,26 +45,6 @@ static void invalidate_bh_lrus(void);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
struct bh_wait_queue {
struct buffer_head *bh;
wait_queue_t wait;
};
#define __DEFINE_BH_WAIT(name, b, f) \
struct bh_wait_queue name = { \
.bh = b, \
.wait = { \
.task = current, \
.flags = f, \
.func = bh_wake_function, \
.task_list = \
LIST_HEAD_INIT(name.wait.task_list),\
}, \
}
#define DEFINE_BH_WAIT(name, bh) __DEFINE_BH_WAIT(name, bh, 0)
#define DEFINE_BH_WAIT_EXCLUSIVE(name, bh) \
__DEFINE_BH_WAIT(name, bh, WQ_FLAG_EXCLUSIVE)
/* /*
* Hashed waitqueue_head's for wait_on_buffer() * Hashed waitqueue_head's for wait_on_buffer()
*/ */
...@@ -95,24 +75,10 @@ void wake_up_buffer(struct buffer_head *bh) ...@@ -95,24 +75,10 @@ void wake_up_buffer(struct buffer_head *bh)
wait_queue_head_t *wq = bh_waitq_head(bh); wait_queue_head_t *wq = bh_waitq_head(bh);
smp_mb(); smp_mb();
if (waitqueue_active(wq)) __wake_up_bit(wq, &bh->b_state, BH_Lock);
__wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, bh);
} }
EXPORT_SYMBOL(wake_up_buffer); EXPORT_SYMBOL(wake_up_buffer);
static int bh_wake_function(wait_queue_t *wait, unsigned mode,
int sync, void *key)
{
struct buffer_head *bh = key;
struct bh_wait_queue *wq;
wq = container_of(wait, struct bh_wait_queue, wait);
if (wq->bh != bh || buffer_locked(bh))
return 0;
else
return autoremove_wake_function(wait, mode, sync, key);
}
static void sync_buffer(struct buffer_head *bh) static void sync_buffer(struct buffer_head *bh)
{ {
struct block_device *bd; struct block_device *bd;
...@@ -126,7 +92,7 @@ static void sync_buffer(struct buffer_head *bh) ...@@ -126,7 +92,7 @@ static void sync_buffer(struct buffer_head *bh)
void fastcall __lock_buffer(struct buffer_head *bh) void fastcall __lock_buffer(struct buffer_head *bh)
{ {
wait_queue_head_t *wqh = bh_waitq_head(bh); wait_queue_head_t *wqh = bh_waitq_head(bh);
DEFINE_BH_WAIT_EXCLUSIVE(wait, bh); DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Lock);
do { do {
prepare_to_wait_exclusive(wqh, &wait.wait, prepare_to_wait_exclusive(wqh, &wait.wait,
...@@ -155,15 +121,13 @@ void fastcall unlock_buffer(struct buffer_head *bh) ...@@ -155,15 +121,13 @@ void fastcall unlock_buffer(struct buffer_head *bh)
void __wait_on_buffer(struct buffer_head * bh) void __wait_on_buffer(struct buffer_head * bh)
{ {
wait_queue_head_t *wqh = bh_waitq_head(bh); wait_queue_head_t *wqh = bh_waitq_head(bh);
DEFINE_BH_WAIT(wait, bh); DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Lock);
do {
prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
if (buffer_locked(bh)) { if (buffer_locked(bh)) {
sync_buffer(bh); sync_buffer(bh);
io_schedule(); io_schedule();
} }
} while (buffer_locked(bh));
finish_wait(wqh, &wait.wait); finish_wait(wqh, &wait.wait);
} }
......
...@@ -37,6 +37,16 @@ struct __wait_queue { ...@@ -37,6 +37,16 @@ struct __wait_queue {
struct list_head task_list; struct list_head task_list;
}; };
struct wait_bit_key {
void *flags;
int bit_nr;
};
struct wait_bit_queue {
struct wait_bit_key key;
wait_queue_t wait;
};
struct __wait_queue_head { struct __wait_queue_head {
spinlock_t lock; spinlock_t lock;
struct list_head task_list; struct list_head task_list;
...@@ -63,6 +73,9 @@ typedef struct __wait_queue_head wait_queue_head_t; ...@@ -63,6 +73,9 @@ typedef struct __wait_queue_head wait_queue_head_t;
#define DECLARE_WAIT_QUEUE_HEAD(name) \ #define DECLARE_WAIT_QUEUE_HEAD(name) \
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
{ .flags = word, .bit_nr = bit, }
static inline void init_waitqueue_head(wait_queue_head_t *q) static inline void init_waitqueue_head(wait_queue_head_t *q)
{ {
q->lock = SPIN_LOCK_UNLOCKED; q->lock = SPIN_LOCK_UNLOCKED;
...@@ -125,6 +138,7 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, ...@@ -125,6 +138,7 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key)); void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key));
extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode)); extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL) #define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL) #define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
...@@ -300,6 +314,7 @@ void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q, ...@@ -300,6 +314,7 @@ void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q,
wait_queue_t *wait, int state)); wait_queue_t *wait, int state));
void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait)); void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait));
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
#define DEFINE_WAIT(name) \ #define DEFINE_WAIT(name) \
wait_queue_t name = { \ wait_queue_t name = { \
...@@ -310,6 +325,17 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void * ...@@ -310,6 +325,17 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *
}, \ }, \
} }
#define DEFINE_WAIT_BIT(name, word, bit) \
struct wait_bit_queue name = { \
.key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
.wait = { \
.task = current, \
.func = wake_bit_function, \
.task_list = \
LIST_HEAD_INIT(name.wait.task_list), \
}, \
}
#define init_wait(wait) \ #define init_wait(wait) \
do { \ do { \
wait->task = current; \ wait->task = current; \
......
...@@ -127,3 +127,26 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void * ...@@ -127,3 +127,26 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *
return ret; return ret;
} }
EXPORT_SYMBOL(autoremove_wake_function); EXPORT_SYMBOL(autoremove_wake_function);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
{
struct wait_bit_key *key = arg;
struct wait_bit_queue *wait_bit
= container_of(wait, struct wait_bit_queue, wait);
if (wait_bit->key.flags != key->flags ||
wait_bit->key.bit_nr != key->bit_nr ||
test_bit(key->bit_nr, key->flags))
return 0;
else
return autoremove_wake_function(wait, mode, sync, key);
}
EXPORT_SYMBOL(wake_bit_function);
void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
{
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
if (waitqueue_active(wq))
__wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
}
EXPORT_SYMBOL(__wake_up_bit);
...@@ -360,40 +360,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, ...@@ -360,40 +360,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
* at a cost of "thundering herd" phenomena during rare hash * at a cost of "thundering herd" phenomena during rare hash
* collisions. * collisions.
*/ */
struct page_wait_queue {
struct page *page;
int bit;
wait_queue_t wait;
};
static int page_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
struct page *page = key;
struct page_wait_queue *wq;
wq = container_of(wait, struct page_wait_queue, wait);
if (wq->page != page || test_bit(wq->bit, &page->flags))
return 0;
else
return autoremove_wake_function(wait, mode, sync, NULL);
}
#define __DEFINE_PAGE_WAIT(name, p, b, f) \
struct page_wait_queue name = { \
.page = p, \
.bit = b, \
.wait = { \
.task = current, \
.func = page_wake_function, \
.flags = f, \
.task_list = LIST_HEAD_INIT(name.wait.task_list),\
}, \
}
#define DEFINE_PAGE_WAIT(name, p, b) __DEFINE_PAGE_WAIT(name, p, b, 0)
#define DEFINE_PAGE_WAIT_EXCLUSIVE(name, p, b) \
__DEFINE_PAGE_WAIT(name, p, b, WQ_FLAG_EXCLUSIVE)
static wait_queue_head_t *page_waitqueue(struct page *page) static wait_queue_head_t *page_waitqueue(struct page *page)
{ {
const struct zone *zone = page_zone(page); const struct zone *zone = page_zone(page);
...@@ -401,27 +367,16 @@ static wait_queue_head_t *page_waitqueue(struct page *page) ...@@ -401,27 +367,16 @@ static wait_queue_head_t *page_waitqueue(struct page *page)
return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
} }
static void wake_up_page(struct page *page)
{
const unsigned int mode = TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE;
wait_queue_head_t *waitqueue = page_waitqueue(page);
if (waitqueue_active(waitqueue))
__wake_up(waitqueue, mode, 1, page);
}
void fastcall wait_on_page_bit(struct page *page, int bit_nr) void fastcall wait_on_page_bit(struct page *page, int bit_nr)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page); wait_queue_head_t *waitqueue = page_waitqueue(page);
DEFINE_PAGE_WAIT(wait, page, bit_nr); DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
do {
prepare_to_wait(waitqueue, &wait.wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(waitqueue, &wait.wait, TASK_UNINTERRUPTIBLE);
if (test_bit(bit_nr, &page->flags)) { if (test_bit(bit_nr, &page->flags)) {
sync_page(page); sync_page(page);
io_schedule(); io_schedule();
} }
} while (test_bit(bit_nr, &page->flags));
finish_wait(waitqueue, &wait.wait); finish_wait(waitqueue, &wait.wait);
} }
...@@ -448,7 +403,7 @@ void fastcall unlock_page(struct page *page) ...@@ -448,7 +403,7 @@ void fastcall unlock_page(struct page *page)
if (!TestClearPageLocked(page)) if (!TestClearPageLocked(page))
BUG(); BUG();
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
wake_up_page(page); __wake_up_bit(page_waitqueue(page), &page->flags, PG_locked);
} }
EXPORT_SYMBOL(unlock_page); EXPORT_SYMBOL(unlock_page);
...@@ -464,7 +419,7 @@ void end_page_writeback(struct page *page) ...@@ -464,7 +419,7 @@ void end_page_writeback(struct page *page)
BUG(); BUG();
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
} }
wake_up_page(page); __wake_up_bit(page_waitqueue(page), &page->flags, PG_writeback);
} }
EXPORT_SYMBOL(end_page_writeback); EXPORT_SYMBOL(end_page_writeback);
...@@ -480,7 +435,7 @@ EXPORT_SYMBOL(end_page_writeback); ...@@ -480,7 +435,7 @@ EXPORT_SYMBOL(end_page_writeback);
void fastcall __lock_page(struct page *page) void fastcall __lock_page(struct page *page)
{ {
wait_queue_head_t *wqh = page_waitqueue(page); wait_queue_head_t *wqh = page_waitqueue(page);
DEFINE_PAGE_WAIT_EXCLUSIVE(wait, page, PG_locked); DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
while (TestSetPageLocked(page)) { while (TestSetPageLocked(page)) {
prepare_to_wait_exclusive(wqh, &wait.wait, TASK_UNINTERRUPTIBLE); prepare_to_wait_exclusive(wqh, &wait.wait, TASK_UNINTERRUPTIBLE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment