Commit 5acda9d1 authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

bdi: avoid oops on device removal

After commit 839a8e86 ("writeback: replace custom worker pool
implementation with unbound workqueue") when device is removed while we
are writing to it we crash in bdi_writeback_workfn() ->
set_worker_desc() because bdi->dev is NULL.

This can happen because even though bdi_unregister() cancels all pending
flushing work, nothing really prevents new ones from being queued from
balance_dirty_pages() or other places.

Fix the problem by clearing BDI_registered bit in bdi_unregister() and
checking it before scheduling of any flushing work.

Fixes: 839a8e86Reviewed-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Cc: Derek Basehore <dbasehore@chromium.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6ca738d6
...@@ -89,16 +89,29 @@ static inline struct inode *wb_inode(struct list_head *head) ...@@ -89,16 +89,29 @@ static inline struct inode *wb_inode(struct list_head *head)
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/writeback.h> #include <trace/events/writeback.h>
static void bdi_wakeup_thread(struct backing_dev_info *bdi)
{
spin_lock_bh(&bdi->wb_lock);
if (test_bit(BDI_registered, &bdi->state))
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
spin_unlock_bh(&bdi->wb_lock);
}
static void bdi_queue_work(struct backing_dev_info *bdi, static void bdi_queue_work(struct backing_dev_info *bdi,
struct wb_writeback_work *work) struct wb_writeback_work *work)
{ {
trace_writeback_queue(bdi, work); trace_writeback_queue(bdi, work);
spin_lock_bh(&bdi->wb_lock); spin_lock_bh(&bdi->wb_lock);
if (!test_bit(BDI_registered, &bdi->state)) {
if (work->done)
complete(work->done);
goto out_unlock;
}
list_add_tail(&work->list, &bdi->work_list); list_add_tail(&work->list, &bdi->work_list);
spin_unlock_bh(&bdi->wb_lock);
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
out_unlock:
spin_unlock_bh(&bdi->wb_lock);
} }
static void static void
...@@ -114,7 +127,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, ...@@ -114,7 +127,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
work = kzalloc(sizeof(*work), GFP_ATOMIC); work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) { if (!work) {
trace_writeback_nowork(bdi); trace_writeback_nowork(bdi);
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); bdi_wakeup_thread(bdi);
return; return;
} }
...@@ -161,7 +174,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi) ...@@ -161,7 +174,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi)
* writeback as soon as there is no other work to do. * writeback as soon as there is no other work to do.
*/ */
trace_writeback_wake_background(bdi); trace_writeback_wake_background(bdi);
mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); bdi_wakeup_thread(bdi);
} }
/* /*
...@@ -1017,7 +1030,7 @@ void bdi_writeback_workfn(struct work_struct *work) ...@@ -1017,7 +1030,7 @@ void bdi_writeback_workfn(struct work_struct *work)
current->flags |= PF_SWAPWRITE; current->flags |= PF_SWAPWRITE;
if (likely(!current_is_workqueue_rescuer() || if (likely(!current_is_workqueue_rescuer() ||
list_empty(&bdi->bdi_list))) { !test_bit(BDI_registered, &bdi->state))) {
/* /*
* The normal path. Keep writing back @bdi until its * The normal path. Keep writing back @bdi until its
* work_list is empty. Note that this path is also taken * work_list is empty. Note that this path is also taken
......
...@@ -95,7 +95,7 @@ struct backing_dev_info { ...@@ -95,7 +95,7 @@ struct backing_dev_info {
unsigned int max_ratio, max_prop_frac; unsigned int max_ratio, max_prop_frac;
struct bdi_writeback wb; /* default writeback info for this bdi */ struct bdi_writeback wb; /* default writeback info for this bdi */
spinlock_t wb_lock; /* protects work_list */ spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
struct list_head work_list; struct list_head work_list;
......
...@@ -297,7 +297,10 @@ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) ...@@ -297,7 +297,10 @@ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
unsigned long timeout; unsigned long timeout;
timeout = msecs_to_jiffies(dirty_writeback_interval * 10); timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
spin_lock_bh(&bdi->wb_lock);
if (test_bit(BDI_registered, &bdi->state))
queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout); queue_delayed_work(bdi_wq, &bdi->wb.dwork, timeout);
spin_unlock_bh(&bdi->wb_lock);
} }
/* /*
...@@ -310,9 +313,6 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi) ...@@ -310,9 +313,6 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi)
spin_unlock_bh(&bdi_lock); spin_unlock_bh(&bdi_lock);
synchronize_rcu_expedited(); synchronize_rcu_expedited();
/* bdi_list is now unused, clear it to mark @bdi dying */
INIT_LIST_HEAD(&bdi->bdi_list);
} }
int bdi_register(struct backing_dev_info *bdi, struct device *parent, int bdi_register(struct backing_dev_info *bdi, struct device *parent,
...@@ -363,6 +363,11 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) ...@@ -363,6 +363,11 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
*/ */
bdi_remove_from_list(bdi); bdi_remove_from_list(bdi);
/* Make sure nobody queues further work */
spin_lock_bh(&bdi->wb_lock);
clear_bit(BDI_registered, &bdi->state);
spin_unlock_bh(&bdi->wb_lock);
/* /*
* Drain work list and shutdown the delayed_work. At this point, * Drain work list and shutdown the delayed_work. At this point,
* @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment