Commit d23b5c57 authored by Yafang Shao's avatar Yafang Shao Committed by Tejun Heo

cgroup: Make operations on the cgroup root_list RCU safe

At present, when we perform operations on the cgroup root_list, we must
hold the cgroup_mutex, which is a relatively heavyweight lock. In reality,
we can make operations on this list RCU-safe, eliminating the need to hold
the cgroup_mutex during traversal. Modifications to the list only occur in
the cgroup root setup and destroy paths, which should be infrequent in a
production environment. In contrast, traversal may occur frequently.
Therefore, making it RCU-safe would be beneficial.
Signed-off-by: default avatarYafang Shao <laoar.shao@gmail.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 96a2b48e
...@@ -563,6 +563,7 @@ struct cgroup_root { ...@@ -563,6 +563,7 @@ struct cgroup_root {
/* A list running through the active hierarchies */ /* A list running through the active hierarchies */
struct list_head root_list; struct list_head root_list;
struct rcu_head rcu;
/* Hierarchy-specific flags */ /* Hierarchy-specific flags */
unsigned int flags; unsigned int flags;
......
...@@ -170,7 +170,8 @@ extern struct list_head cgroup_roots; ...@@ -170,7 +170,8 @@ extern struct list_head cgroup_roots;
/* iterate across the hierarchies */ /* iterate across the hierarchies */
#define for_each_root(root) \ #define for_each_root(root) \
list_for_each_entry((root), &cgroup_roots, root_list) list_for_each_entry_rcu((root), &cgroup_roots, root_list, \
lockdep_is_held(&cgroup_mutex))
/** /**
* for_each_subsys - iterate all enabled cgroup subsystems * for_each_subsys - iterate all enabled cgroup subsystems
......
...@@ -1315,7 +1315,7 @@ static void cgroup_exit_root_id(struct cgroup_root *root) ...@@ -1315,7 +1315,7 @@ static void cgroup_exit_root_id(struct cgroup_root *root)
void cgroup_free_root(struct cgroup_root *root) void cgroup_free_root(struct cgroup_root *root)
{ {
kfree(root); kfree_rcu(root, rcu);
} }
static void cgroup_destroy_root(struct cgroup_root *root) static void cgroup_destroy_root(struct cgroup_root *root)
...@@ -1348,7 +1348,7 @@ static void cgroup_destroy_root(struct cgroup_root *root) ...@@ -1348,7 +1348,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
spin_unlock_irq(&css_set_lock); spin_unlock_irq(&css_set_lock);
WARN_ON_ONCE(list_empty(&root->root_list)); WARN_ON_ONCE(list_empty(&root->root_list));
list_del(&root->root_list); list_del_rcu(&root->root_list);
cgroup_root_count--; cgroup_root_count--;
if (!have_favordynmods) if (!have_favordynmods)
...@@ -1389,7 +1389,15 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset, ...@@ -1389,7 +1389,15 @@ static inline struct cgroup *__cset_cgroup_from_root(struct css_set *cset,
} }
} }
BUG_ON(!res_cgroup); /*
* If cgroup_mutex is not held, the cgrp_cset_link will be freed
* before we remove the cgroup root from the root_list. Consequently,
* when accessing a cgroup root, the cset_link may have already been
* freed, resulting in a NULL res_cgroup. However, by holding the
* cgroup_mutex, we ensure that res_cgroup can't be NULL.
* If we don't hold cgroup_mutex in the caller, we must do the NULL
* check.
*/
return res_cgroup; return res_cgroup;
} }
...@@ -1448,7 +1456,6 @@ static struct cgroup *current_cgns_cgroup_dfl(void) ...@@ -1448,7 +1456,6 @@ static struct cgroup *current_cgns_cgroup_dfl(void)
static struct cgroup *cset_cgroup_from_root(struct css_set *cset, static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
struct cgroup_root *root) struct cgroup_root *root)
{ {
lockdep_assert_held(&cgroup_mutex);
lockdep_assert_held(&css_set_lock); lockdep_assert_held(&css_set_lock);
return __cset_cgroup_from_root(cset, root); return __cset_cgroup_from_root(cset, root);
...@@ -1456,7 +1463,9 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset, ...@@ -1456,7 +1463,9 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
/* /*
* Return the cgroup for "task" from the given hierarchy. Must be * Return the cgroup for "task" from the given hierarchy. Must be
* called with cgroup_mutex and css_set_lock held. * called with css_set_lock held to prevent task's groups from being modified.
* Must be called with either cgroup_mutex or rcu read lock to prevent the
* cgroup root from being destroyed.
*/ */
struct cgroup *task_cgroup_from_root(struct task_struct *task, struct cgroup *task_cgroup_from_root(struct task_struct *task,
struct cgroup_root *root) struct cgroup_root *root)
...@@ -2031,7 +2040,7 @@ void init_cgroup_root(struct cgroup_fs_context *ctx) ...@@ -2031,7 +2040,7 @@ void init_cgroup_root(struct cgroup_fs_context *ctx)
struct cgroup_root *root = ctx->root; struct cgroup_root *root = ctx->root;
struct cgroup *cgrp = &root->cgrp; struct cgroup *cgrp = &root->cgrp;
INIT_LIST_HEAD(&root->root_list); INIT_LIST_HEAD_RCU(&root->root_list);
atomic_set(&root->nr_cgrps, 1); atomic_set(&root->nr_cgrps, 1);
cgrp->root = root; cgrp->root = root;
init_cgroup_housekeeping(cgrp); init_cgroup_housekeeping(cgrp);
...@@ -2114,7 +2123,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) ...@@ -2114,7 +2123,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
* care of subsystems' refcounts, which are explicitly dropped in * care of subsystems' refcounts, which are explicitly dropped in
* the failure exit path. * the failure exit path.
*/ */
list_add(&root->root_list, &cgroup_roots); list_add_rcu(&root->root_list, &cgroup_roots);
cgroup_root_count++; cgroup_root_count++;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment