Commit 2a7f1244 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

blkcg: move rcu_read_lock() outside of blkio_group get functions

rcu_read_lock() in throtl_get_tb() and cfq_get_cfqg() holds onto
@blkcg while looking up blkg.  For API cleanup, the next patch will
make the caller responsible for determining @blkcg to look blkg from
and let them specify it as a parameter.  Move rcu read locking out to
the callers to prepare for the change.

-v2: Originally this patch was described as a fix for RCU read locking
     bug around @blkg, which Vivek pointed out to be incorrect.  It
     was from misunderstanding the role of rcu locking as protecting
     @blkg not @blkcg.  Patch description updated.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 72e06c25
...@@ -313,25 +313,23 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) ...@@ -313,25 +313,23 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
if (unlikely(blk_queue_bypass(q))) if (unlikely(blk_queue_bypass(q)))
return NULL; return NULL;
rcu_read_lock();
blkcg = task_blkio_cgroup(current); blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg); tg = throtl_find_tg(td, blkcg);
if (tg) { if (tg)
rcu_read_unlock();
return tg; return tg;
}
/* /*
* Need to allocate a group. Allocation of group also needs allocation * Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence * of per cpu stats which in-turn takes a mutex() and can block. Hence
* we need to drop rcu lock and queue_lock before we call alloc. * we need to drop rcu lock and queue_lock before we call alloc.
*/ */
rcu_read_unlock();
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
rcu_read_unlock();
tg = throtl_alloc_tg(td); tg = throtl_alloc_tg(td);
/* Group allocated and queue is still alive. take the lock */ /* Group allocated and queue is still alive. take the lock */
rcu_read_lock();
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
/* Make sure @q is still alive */ /* Make sure @q is still alive */
...@@ -343,7 +341,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) ...@@ -343,7 +341,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
/* /*
* Initialize the new group. After sleeping, read the blkcg again. * Initialize the new group. After sleeping, read the blkcg again.
*/ */
rcu_read_lock();
blkcg = task_blkio_cgroup(current); blkcg = task_blkio_cgroup(current);
/* /*
...@@ -354,7 +351,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) ...@@ -354,7 +351,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
if (__tg) { if (__tg) {
kfree(tg); kfree(tg);
rcu_read_unlock();
return __tg; return __tg;
} }
...@@ -365,7 +361,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) ...@@ -365,7 +361,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
} }
throtl_init_add_tg_lists(td, tg, blkcg); throtl_init_add_tg_lists(td, tg, blkcg);
rcu_read_unlock();
return tg; return tg;
} }
...@@ -1150,7 +1145,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) ...@@ -1150,7 +1145,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
* basic fields like stats and io rates. If a group has no rules, * basic fields like stats and io rates. If a group has no rules,
* just update the dispatch stats in lockless manner and return. * just update the dispatch stats in lockless manner and return.
*/ */
rcu_read_lock(); rcu_read_lock();
blkcg = task_blkio_cgroup(current); blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg); tg = throtl_find_tg(td, blkcg);
...@@ -1160,11 +1154,9 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) ...@@ -1160,11 +1154,9 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
if (tg_no_rule_group(tg, rw)) { if (tg_no_rule_group(tg, rw)) {
blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
rw, rw_is_sync(bio->bi_rw)); rw, rw_is_sync(bio->bi_rw));
rcu_read_unlock(); goto out_unlock_rcu;
goto out;
} }
} }
rcu_read_unlock();
/* /*
* Either group has not been allocated yet or it is not an unlimited * Either group has not been allocated yet or it is not an unlimited
...@@ -1222,6 +1214,8 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) ...@@ -1222,6 +1214,8 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
out_unlock: out_unlock:
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
out_unlock_rcu:
rcu_read_unlock();
out: out:
return throttled; return throttled;
} }
......
...@@ -1128,13 +1128,10 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) ...@@ -1128,13 +1128,10 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
struct cfq_group *cfqg = NULL, *__cfqg = NULL; struct cfq_group *cfqg = NULL, *__cfqg = NULL;
struct request_queue *q = cfqd->queue; struct request_queue *q = cfqd->queue;
rcu_read_lock();
blkcg = task_blkio_cgroup(current); blkcg = task_blkio_cgroup(current);
cfqg = cfq_find_cfqg(cfqd, blkcg); cfqg = cfq_find_cfqg(cfqd, blkcg);
if (cfqg) { if (cfqg)
rcu_read_unlock();
return cfqg; return cfqg;
}
/* /*
* Need to allocate a group. Allocation of group also needs allocation * Need to allocate a group. Allocation of group also needs allocation
...@@ -1164,7 +1161,6 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) ...@@ -1164,7 +1161,6 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
if (__cfqg) { if (__cfqg) {
kfree(cfqg); kfree(cfqg);
rcu_read_unlock();
return __cfqg; return __cfqg;
} }
...@@ -1172,7 +1168,6 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) ...@@ -1172,7 +1168,6 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
cfqg = &cfqd->root_group; cfqg = &cfqd->root_group;
cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg); cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
rcu_read_unlock();
return cfqg; return cfqg;
} }
...@@ -2870,6 +2865,8 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, ...@@ -2870,6 +2865,8 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
struct cfq_group *cfqg; struct cfq_group *cfqg;
retry: retry:
rcu_read_lock();
cfqg = cfq_get_cfqg(cfqd); cfqg = cfq_get_cfqg(cfqd);
cic = cfq_cic_lookup(cfqd, ioc); cic = cfq_cic_lookup(cfqd, ioc);
/* cic always exists here */ /* cic always exists here */
...@@ -2885,6 +2882,7 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, ...@@ -2885,6 +2882,7 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
cfqq = new_cfqq; cfqq = new_cfqq;
new_cfqq = NULL; new_cfqq = NULL;
} else if (gfp_mask & __GFP_WAIT) { } else if (gfp_mask & __GFP_WAIT) {
rcu_read_unlock();
spin_unlock_irq(cfqd->queue->queue_lock); spin_unlock_irq(cfqd->queue->queue_lock);
new_cfqq = kmem_cache_alloc_node(cfq_pool, new_cfqq = kmem_cache_alloc_node(cfq_pool,
gfp_mask | __GFP_ZERO, gfp_mask | __GFP_ZERO,
...@@ -2910,6 +2908,7 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, ...@@ -2910,6 +2908,7 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
if (new_cfqq) if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq); kmem_cache_free(cfq_pool, new_cfqq);
rcu_read_unlock();
return cfqq; return cfqq;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment