Commit d708f0d5 authored by Jens Axboe's avatar Jens Axboe

Revert "blkcg: allocate struct blkcg_gq outside request queue spinlock"

I inadvertently applied the v5 version of this patch, whereas
the agreed upon version was v5. Revert this one so we can apply
the right one.

This reverts commit 7fc6b87a.
parent 48b99c9d
...@@ -165,18 +165,16 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, ...@@ -165,18 +165,16 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
EXPORT_SYMBOL_GPL(blkg_lookup_slowpath); EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
/* /*
* If gfp mask allows blocking, this function temporarily drops rcu and queue * If @new_blkg is %NULL, this function tries to allocate a new one as
* locks to allocate memory. * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
*/ */
static struct blkcg_gq *blkg_create(struct blkcg *blkcg, static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
struct request_queue *q, gfp_t gfp, struct request_queue *q,
const struct blkcg_policy *pol) struct blkcg_gq *new_blkg)
{ {
struct blkcg_gq *blkg = NULL; struct blkcg_gq *blkg;
struct bdi_writeback_congested *wb_congested; struct bdi_writeback_congested *wb_congested;
int i, ret; int i, ret;
const bool drop_locks = gfpflags_allow_blocking(gfp);
bool preloaded = false;
WARN_ON_ONCE(!rcu_read_lock_held()); WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock); lockdep_assert_held(q->queue_lock);
...@@ -187,53 +185,31 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, ...@@ -187,53 +185,31 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
goto err_free_blkg; goto err_free_blkg;
} }
if (drop_locks) {
spin_unlock_irq(q->queue_lock);
rcu_read_unlock();
}
wb_congested = wb_congested_get_create(q->backing_dev_info, wb_congested = wb_congested_get_create(q->backing_dev_info,
blkcg->css.id, gfp); blkcg->css.id,
blkg = blkg_alloc(blkcg, q, gfp); GFP_NOWAIT | __GFP_NOWARN);
if (!wb_congested) {
if (drop_locks) {
preloaded = !radix_tree_preload(gfp);
rcu_read_lock();
spin_lock_irq(q->queue_lock);
}
if (unlikely(!wb_congested || !blkg)) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_put; goto err_put_css;
}
blkg->wb_congested = wb_congested;
if (pol) {
WARN_ON(!drop_locks);
if (!blkcg_policy_enabled(q, pol)) {
ret = -EOPNOTSUPP;
goto err_put;
} }
/* /* allocate */
* This could be the first entry point of blkcg implementation if (!new_blkg) {
* and we shouldn't allow anything to go through for a bypassing new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
* queue. if (unlikely(!new_blkg)) {
*/ ret = -ENOMEM;
if (unlikely(blk_queue_bypass(q))) { goto err_put_congested;
ret = blk_queue_dying(q) ? -ENODEV : -EBUSY;
goto err_put;
} }
} }
blkg = new_blkg;
blkg->wb_congested = wb_congested;
/* link parent */ /* link parent */
if (blkcg_parent(blkcg)) { if (blkcg_parent(blkcg)) {
blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
if (WARN_ON_ONCE(!blkg->parent)) { if (WARN_ON_ONCE(!blkg->parent)) {
ret = -ENODEV; ret = -ENODEV;
goto err_put; goto err_put_congested;
} }
blkg_get(blkg->parent); blkg_get(blkg->parent);
} }
...@@ -260,9 +236,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, ...@@ -260,9 +236,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
pol->pd_online_fn(blkg->pd[i]); pol->pd_online_fn(blkg->pd[i]);
} }
} }
if (preloaded)
radix_tree_preload_end();
blkg->online = true; blkg->online = true;
spin_unlock(&blkcg->lock); spin_unlock(&blkcg->lock);
...@@ -273,45 +246,44 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, ...@@ -273,45 +246,44 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg_put(blkg); blkg_put(blkg);
return ERR_PTR(ret); return ERR_PTR(ret);
err_put: err_put_congested:
if (preloaded)
radix_tree_preload_end();
if (wb_congested)
wb_congested_put(wb_congested); wb_congested_put(wb_congested);
err_put_css:
css_put(&blkcg->css); css_put(&blkcg->css);
err_free_blkg: err_free_blkg:
blkg_free(blkg); blkg_free(new_blkg);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
/** /**
* __blkg_lookup_create - lookup blkg, try to create one if not there * blkg_lookup_create - lookup blkg, try to create one if not there
* @blkcg: blkcg of interest * @blkcg: blkcg of interest
* @q: request_queue of interest * @q: request_queue of interest
* @gfp: gfp mask
* @pol: blkcg policy (optional)
* *
* Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
* create one. blkg creation is performed recursively from blkcg_root such * create one. blkg creation is performed recursively from blkcg_root such
* that all non-root blkg's have access to the parent blkg. This function * that all non-root blkg's have access to the parent blkg. This function
* should be called under RCU read lock and @q->queue_lock. * should be called under RCU read lock and @q->queue_lock.
* *
* When gfp mask allows blocking, rcu and queue locks may be dropped for
* allocating memory. In this case, the locks will be reacquired on return.
*
* Returns pointer to the looked up or created blkg on success, ERR_PTR() * Returns pointer to the looked up or created blkg on success, ERR_PTR()
* value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
* dead and bypassing, returns ERR_PTR(-EBUSY). * dead and bypassing, returns ERR_PTR(-EBUSY).
*/ */
struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q, gfp_t gfp, struct request_queue *q)
const struct blkcg_policy *pol)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held()); WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock); lockdep_assert_held(q->queue_lock);
/*
* This could be the first entry point of blkcg implementation and
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
blkg = __blkg_lookup(blkcg, q, true); blkg = __blkg_lookup(blkcg, q, true);
if (blkg) if (blkg)
return blkg; return blkg;
...@@ -329,35 +301,12 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, ...@@ -329,35 +301,12 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
parent = blkcg_parent(parent); parent = blkcg_parent(parent);
} }
blkg = blkg_create(pos, q, gfp, pol); blkg = blkg_create(pos, q, NULL);
if (pos == blkcg || IS_ERR(blkg)) if (pos == blkcg || IS_ERR(blkg))
return blkg; return blkg;
} }
} }
/**
* blkg_lookup_create - lookup blkg, try to create one if not there
*
* Performs an initial queue bypass check and then passes control to
* __blkg_lookup_create().
*/
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q, gfp_t gfp,
const struct blkcg_policy *pol)
{
WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock);
/*
* This could be the first entry point of blkcg implementation and
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
return __blkg_lookup_create(blkcg, q, gfp, pol);
}
static void blkg_destroy(struct blkcg_gq *blkg) static void blkg_destroy(struct blkcg_gq *blkg)
{ {
struct blkcg *blkcg = blkg->blkcg; struct blkcg *blkcg = blkg->blkcg;
...@@ -868,7 +817,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, ...@@ -868,7 +817,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
spin_lock_irq(disk->queue->queue_lock); spin_lock_irq(disk->queue->queue_lock);
if (blkcg_policy_enabled(disk->queue, pol)) if (blkcg_policy_enabled(disk->queue, pol))
blkg = blkg_lookup_create(blkcg, disk->queue, GFP_KERNEL, pol); blkg = blkg_lookup_create(blkcg, disk->queue);
else else
blkg = ERR_PTR(-EOPNOTSUPP); blkg = ERR_PTR(-EOPNOTSUPP);
...@@ -1107,15 +1056,30 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css) ...@@ -1107,15 +1056,30 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
*/ */
int blkcg_init_queue(struct request_queue *q) int blkcg_init_queue(struct request_queue *q)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *new_blkg, *blkg;
bool preloaded;
int ret; int ret;
new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
if (!new_blkg)
return -ENOMEM;
preloaded = !radix_tree_preload(GFP_KERNEL);
/*
* Make sure the root blkg exists and count the existing blkgs. As
* @q is bypassing at this point, blkg_lookup_create() can't be
* used. Open code insertion.
*/
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
blkg = __blkg_lookup_create(&blkcg_root, q, GFP_KERNEL, NULL); blkg = blkg_create(&blkcg_root, q, new_blkg);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
rcu_read_unlock(); rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
if (IS_ERR(blkg)) if (IS_ERR(blkg))
return PTR_ERR(blkg); return PTR_ERR(blkg);
......
...@@ -172,8 +172,7 @@ extern struct cgroup_subsys_state * const blkcg_root_css; ...@@ -172,8 +172,7 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint); struct request_queue *q, bool update_hint);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q, gfp_t gfp, struct request_queue *q);
const struct blkcg_policy *pol);
int blkcg_init_queue(struct request_queue *q); int blkcg_init_queue(struct request_queue *q);
void blkcg_drain_queue(struct request_queue *q); void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q); void blkcg_exit_queue(struct request_queue *q);
...@@ -695,8 +694,7 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, ...@@ -695,8 +694,7 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
blkg = blkg_lookup(blkcg, q); blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) { if (unlikely(!blkg)) {
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
blkg = blkg_lookup_create(blkcg, q, GFP_NOWAIT | __GFP_NOWARN, blkg = blkg_lookup_create(blkcg, q);
NULL);
if (IS_ERR(blkg)) if (IS_ERR(blkg))
blkg = NULL; blkg = NULL;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment