Commit bcddc3f0 authored by Jens Axboe's avatar Jens Axboe

writeback: inline allocation failure handling in bdi_alloc_queue_work()

This gets rid of work == NULL in bdi_queue_work() and puts the
OOM handling where it belongs.
Acked-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent cfc4ba53
...@@ -149,21 +149,19 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) ...@@ -149,21 +149,19 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
{ {
if (work) { work->seen = bdi->wb_mask;
work->seen = bdi->wb_mask; BUG_ON(!work->seen);
BUG_ON(!work->seen); atomic_set(&work->pending, bdi->wb_cnt);
atomic_set(&work->pending, bdi->wb_cnt); BUG_ON(!bdi->wb_cnt);
BUG_ON(!bdi->wb_cnt);
/* /*
* Make sure stores are seen before it appears on the list * Make sure stores are seen before it appears on the list
*/ */
smp_mb(); smp_mb();
spin_lock(&bdi->wb_lock); spin_lock(&bdi->wb_lock);
list_add_tail_rcu(&work->list, &bdi->work_list); list_add_tail_rcu(&work->list, &bdi->work_list);
spin_unlock(&bdi->wb_lock); spin_unlock(&bdi->wb_lock);
}
/* /*
* If the default thread isn't there, make sure we add it. When * If the default thread isn't there, make sure we add it. When
...@@ -175,14 +173,12 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) ...@@ -175,14 +173,12 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
struct bdi_writeback *wb = &bdi->wb; struct bdi_writeback *wb = &bdi->wb;
/* /*
* If we failed allocating the bdi work item, wake up the wb * End work now if this wb has no dirty IO pending. Otherwise
* thread always. As a safety precaution, it'll flush out * wakeup the handling thread
* everything
*/ */
if (!wb_has_dirty_io(wb)) { if (!wb_has_dirty_io(wb))
if (work) wb_clear_pending(wb, work);
wb_clear_pending(wb, work); else if (wb->task)
} else if (wb->task)
wake_up_process(wb->task); wake_up_process(wb->task);
} }
} }
...@@ -202,11 +198,20 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi, ...@@ -202,11 +198,20 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
{ {
struct bdi_work *work; struct bdi_work *work;
/*
* This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback
*/
work = kmalloc(sizeof(*work), GFP_ATOMIC); work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work) if (work) {
bdi_work_init(work, wbc); bdi_work_init(work, wbc);
bdi_queue_work(bdi, work);
} else {
struct bdi_writeback *wb = &bdi->wb;
bdi_queue_work(bdi, work); if (wb->task)
wake_up_process(wb->task);
}
} }
void bdi_start_writeback(struct writeback_control *wbc) void bdi_start_writeback(struct writeback_control *wbc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment