Commit 07416d29 authored by Jens Axboe's avatar Jens Axboe

cfq-iosched: fix RCU race in the cfq io_context destructor handling

put_io_context() drops the RCU read lock before calling into cfq_dtor(),
however we need to hold off freeing there before grabbing and
dereferencing the first object on the list.

So extend the rcu_read_lock() scope to cover the calling of cfq_dtor(),
and optimize cfq_free_io_context() to use a new variant for
call_for_each_cic() that assumes the RCU read lock is already held.

Hit in the wild by Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent aa94b537
...@@ -41,8 +41,8 @@ int put_io_context(struct io_context *ioc) ...@@ -41,8 +41,8 @@ int put_io_context(struct io_context *ioc)
rcu_read_lock(); rcu_read_lock();
if (ioc->aic && ioc->aic->dtor) if (ioc->aic && ioc->aic->dtor)
ioc->aic->dtor(ioc->aic); ioc->aic->dtor(ioc->aic);
rcu_read_unlock();
cfq_dtor(ioc); cfq_dtor(ioc);
rcu_read_unlock();
kmem_cache_free(iocontext_cachep, ioc); kmem_cache_free(iocontext_cachep, ioc);
return 1; return 1;
......
...@@ -1142,19 +1142,26 @@ static void cfq_put_queue(struct cfq_queue *cfqq) ...@@ -1142,19 +1142,26 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
kmem_cache_free(cfq_pool, cfqq); kmem_cache_free(cfq_pool, cfqq);
} }
/*
* Call func for each cic attached to this ioc.
*/
static void static void
call_for_each_cic(struct io_context *ioc, __call_for_each_cic(struct io_context *ioc,
void (*func)(struct io_context *, struct cfq_io_context *)) void (*func)(struct io_context *, struct cfq_io_context *))
{ {
struct cfq_io_context *cic; struct cfq_io_context *cic;
struct hlist_node *n; struct hlist_node *n;
rcu_read_lock();
hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
func(ioc, cic); func(ioc, cic);
}
/*
* Call func for each cic attached to this ioc.
*/
static void
call_for_each_cic(struct io_context *ioc,
void (*func)(struct io_context *, struct cfq_io_context *))
{
rcu_read_lock();
__call_for_each_cic(ioc, func);
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -1198,7 +1205,7 @@ static void cfq_free_io_context(struct io_context *ioc) ...@@ -1198,7 +1205,7 @@ static void cfq_free_io_context(struct io_context *ioc)
* should be ok to iterate over the known list, we will see all cic's * should be ok to iterate over the known list, we will see all cic's
* since no new ones are added. * since no new ones are added.
*/ */
call_for_each_cic(ioc, cic_free_func); __call_for_each_cic(ioc, cic_free_func);
} }
static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment