Commit 3c96cb32 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

blkcg: drop stuff unused after per-queue policy activation update

* All_q_list is unused.  Drop all_q_{mutex|list}.

* @for_root of blkg_lookup_create() is always %false when called from
  outside blk-cgroup.c proper.  Factor out __blkg_lookup_create() so
  that it doesn't check whether @q is bypassing and use the
  underscored version for the @for_root callsite.

* blkg_destroy_all() is used only from blkcg proper and @destroy_root
  is always %true.  Make it static and drop @destroy_root.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a2b1693b
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
#define MAX_KEY_LEN 100 #define MAX_KEY_LEN 100
static DEFINE_MUTEX(blkcg_pol_mutex); static DEFINE_MUTEX(blkcg_pol_mutex);
static DEFINE_MUTEX(all_q_mutex);
static LIST_HEAD(all_q_list);
struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; struct blkio_cgroup blkio_root_cgroup = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkio_root_cgroup); EXPORT_SYMBOL_GPL(blkio_root_cgroup);
...@@ -179,9 +177,8 @@ struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, ...@@ -179,9 +177,8 @@ struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
} }
EXPORT_SYMBOL_GPL(blkg_lookup); EXPORT_SYMBOL_GPL(blkg_lookup);
struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, static struct blkio_group *__blkg_lookup_create(struct blkio_cgroup *blkcg,
struct request_queue *q, struct request_queue *q)
bool for_root)
__releases(q->queue_lock) __acquires(q->queue_lock) __releases(q->queue_lock) __acquires(q->queue_lock)
{ {
struct blkio_group *blkg; struct blkio_group *blkg;
...@@ -189,13 +186,6 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, ...@@ -189,13 +186,6 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
WARN_ON_ONCE(!rcu_read_lock_held()); WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock); lockdep_assert_held(q->queue_lock);
/*
* This could be the first entry point of blkcg implementation and
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)) && !for_root)
return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
blkg = __blkg_lookup(blkcg, q); blkg = __blkg_lookup(blkcg, q);
if (blkg) if (blkg)
return blkg; return blkg;
...@@ -223,6 +213,18 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, ...@@ -223,6 +213,18 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
out: out:
return blkg; return blkg;
} }
struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
struct request_queue *q)
{
/*
* This could be the first entry point of blkcg implementation and
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
return __blkg_lookup_create(blkcg, q);
}
EXPORT_SYMBOL_GPL(blkg_lookup_create); EXPORT_SYMBOL_GPL(blkg_lookup_create);
static void blkg_destroy(struct blkio_group *blkg) static void blkg_destroy(struct blkio_group *blkg)
...@@ -249,12 +251,10 @@ static void blkg_destroy(struct blkio_group *blkg) ...@@ -249,12 +251,10 @@ static void blkg_destroy(struct blkio_group *blkg)
/** /**
* blkg_destroy_all - destroy all blkgs associated with a request_queue * blkg_destroy_all - destroy all blkgs associated with a request_queue
* @q: request_queue of interest * @q: request_queue of interest
* @destroy_root: whether to destroy root blkg or not
* *
* Destroy blkgs associated with @q. If @destroy_root is %true, all are * Destroy all blkgs associated with @q.
* destroyed; otherwise, root blkg is left alone.
*/ */
void blkg_destroy_all(struct request_queue *q, bool destroy_root) static void blkg_destroy_all(struct request_queue *q)
{ {
struct blkio_group *blkg, *n; struct blkio_group *blkg, *n;
...@@ -263,10 +263,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root) ...@@ -263,10 +263,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root)
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkio_cgroup *blkcg = blkg->blkcg; struct blkio_cgroup *blkcg = blkg->blkcg;
/* skip root? */
if (!destroy_root && blkg->blkcg == &blkio_root_cgroup)
continue;
spin_lock(&blkcg->lock); spin_lock(&blkcg->lock);
blkg_destroy(blkg); blkg_destroy(blkg);
spin_unlock(&blkcg->lock); spin_unlock(&blkcg->lock);
...@@ -274,7 +270,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root) ...@@ -274,7 +270,6 @@ void blkg_destroy_all(struct request_queue *q, bool destroy_root)
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
} }
EXPORT_SYMBOL_GPL(blkg_destroy_all);
static void blkg_rcu_free(struct rcu_head *rcu_head) static void blkg_rcu_free(struct rcu_head *rcu_head)
{ {
...@@ -492,7 +487,7 @@ int blkg_conf_prep(struct blkio_cgroup *blkcg, ...@@ -492,7 +487,7 @@ int blkg_conf_prep(struct blkio_cgroup *blkcg,
spin_lock_irq(disk->queue->queue_lock); spin_lock_irq(disk->queue->queue_lock);
if (blkcg_policy_enabled(disk->queue, pol)) if (blkcg_policy_enabled(disk->queue, pol))
blkg = blkg_lookup_create(blkcg, disk->queue, false); blkg = blkg_lookup_create(blkcg, disk->queue);
else else
blkg = ERR_PTR(-EINVAL); blkg = ERR_PTR(-EINVAL);
...@@ -625,20 +620,9 @@ static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup) ...@@ -625,20 +620,9 @@ static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
*/ */
int blkcg_init_queue(struct request_queue *q) int blkcg_init_queue(struct request_queue *q)
{ {
int ret;
might_sleep(); might_sleep();
ret = blk_throtl_init(q); return blk_throtl_init(q);
if (ret)
return ret;
mutex_lock(&all_q_mutex);
INIT_LIST_HEAD(&q->all_q_node);
list_add_tail(&q->all_q_node, &all_q_list);
mutex_unlock(&all_q_mutex);
return 0;
} }
/** /**
...@@ -662,12 +646,7 @@ void blkcg_drain_queue(struct request_queue *q) ...@@ -662,12 +646,7 @@ void blkcg_drain_queue(struct request_queue *q)
*/ */
void blkcg_exit_queue(struct request_queue *q) void blkcg_exit_queue(struct request_queue *q)
{ {
mutex_lock(&all_q_mutex); blkg_destroy_all(q);
list_del_init(&q->all_q_node);
mutex_unlock(&all_q_mutex);
blkg_destroy_all(q, true);
blk_throtl_exit(q); blk_throtl_exit(q);
} }
...@@ -741,7 +720,7 @@ int blkcg_activate_policy(struct request_queue *q, ...@@ -741,7 +720,7 @@ int blkcg_activate_policy(struct request_queue *q,
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
rcu_read_lock(); rcu_read_lock();
blkg = blkg_lookup_create(&blkio_root_cgroup, q, true); blkg = __blkg_lookup_create(&blkio_root_cgroup, q);
rcu_read_unlock(); rcu_read_unlock();
if (IS_ERR(blkg)) { if (IS_ERR(blkg)) {
......
...@@ -115,7 +115,6 @@ extern int blkcg_activate_policy(struct request_queue *q, ...@@ -115,7 +115,6 @@ extern int blkcg_activate_policy(struct request_queue *q,
const struct blkio_policy_type *pol); const struct blkio_policy_type *pol);
extern void blkcg_deactivate_policy(struct request_queue *q, extern void blkcg_deactivate_policy(struct request_queue *q,
const struct blkio_policy_type *pol); const struct blkio_policy_type *pol);
extern void blkg_destroy_all(struct request_queue *q, bool destroy_root);
void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg, void blkcg_print_blkgs(struct seq_file *sf, struct blkio_cgroup *blkcg,
u64 (*prfill)(struct seq_file *, void *, int), u64 (*prfill)(struct seq_file *, void *, int),
...@@ -334,8 +333,6 @@ static inline int blkcg_activate_policy(struct request_queue *q, ...@@ -334,8 +333,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
const struct blkio_policy_type *pol) { return 0; } const struct blkio_policy_type *pol) { return 0; }
static inline void blkcg_deactivate_policy(struct request_queue *q, static inline void blkcg_deactivate_policy(struct request_queue *q,
const struct blkio_policy_type *pol) { } const struct blkio_policy_type *pol) { }
static inline void blkg_destroy_all(struct request_queue *q,
bool destory_root) { }
static inline void *blkg_to_pdata(struct blkio_group *blkg, static inline void *blkg_to_pdata(struct blkio_group *blkg,
struct blkio_policy_type *pol) { return NULL; } struct blkio_policy_type *pol) { return NULL; }
...@@ -354,8 +351,7 @@ extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio); ...@@ -354,8 +351,7 @@ extern struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio);
extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
struct request_queue *q); struct request_queue *q);
struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
struct request_queue *q, struct request_queue *q);
bool for_root);
#else #else
struct cgroup; struct cgroup;
static inline struct blkio_cgroup * static inline struct blkio_cgroup *
......
...@@ -285,7 +285,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, ...@@ -285,7 +285,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
} else { } else {
struct blkio_group *blkg; struct blkio_group *blkg;
blkg = blkg_lookup_create(blkcg, q, false); blkg = blkg_lookup_create(blkcg, q);
/* if %NULL and @q is alive, fall back to root_tg */ /* if %NULL and @q is alive, fall back to root_tg */
if (!IS_ERR(blkg)) if (!IS_ERR(blkg))
......
...@@ -1348,7 +1348,7 @@ static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, ...@@ -1348,7 +1348,7 @@ static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
} else { } else {
struct blkio_group *blkg; struct blkio_group *blkg;
blkg = blkg_lookup_create(blkcg, q, false); blkg = blkg_lookup_create(blkcg, q);
if (!IS_ERR(blkg)) if (!IS_ERR(blkg))
cfqg = blkg_to_cfqg(blkg); cfqg = blkg_to_cfqg(blkg);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment