Commit 56fde9e0 authored by Tejun Heo's avatar Tejun Heo

cgroup: enable task_cg_lists on the first cgroup mount

Tasks are not linked on their css_sets until cgroup task iteration is
actually used.  This is to avoid incurring overhead on the fork and
exit paths for systems which have cgroup compiled in but don't use it.
     
This lazy binding also affects the task migration path.  It has to be
careful so that it doesn't link tasks to css_sets when task_cg_lists
linking is not enabled yet.  Unfortunately, this conditional linking
in the migration path interferes with planned migration updates.

This patch moves the lazy binding a bit earlier, to the first cgroup
mount.  It's a clear indication that cgroup is being used on the
system and task_cg_lists linking is highly likely to be enabled soon
anyway through "tasks" and "cgroup.procs" files.

This allows cgroup_task_migrate() to always link @tsk->cg_list.  Note
that it may still race with cgroup_post_fork() but who wins that race
is inconsequential.

While at it, make use_task_css_set_links a bool, add sanity checks in
cgroup_enable_task_cg_lists() and css_task_iter_start(), and update
the former so that it's guaranteed and assumes to run only once.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarLi Zefan <lizefan@huawei.com>
parent 35585573
...@@ -173,6 +173,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp); ...@@ -173,6 +173,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp);
static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
bool is_add); bool is_add);
static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); static void cgroup_pidlist_destroy_all(struct cgroup *cgrp);
static void cgroup_enable_task_cg_lists(void);
/** /**
* cgroup_css - obtain a cgroup's css for the specified subsystem * cgroup_css - obtain a cgroup's css for the specified subsystem
...@@ -375,7 +376,7 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) ...@@ -375,7 +376,7 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
* fork()/exit() overhead for people who have cgroups compiled into their * fork()/exit() overhead for people who have cgroups compiled into their
* kernel but not actually in use. * kernel but not actually in use.
*/ */
static int use_task_css_set_links __read_mostly; static bool use_task_css_set_links __read_mostly;
static void __put_css_set(struct css_set *cset, int taskexit) static void __put_css_set(struct css_set *cset, int taskexit)
{ {
...@@ -1441,6 +1442,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, ...@@ -1441,6 +1442,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
struct cgroup_sb_opts opts; struct cgroup_sb_opts opts;
struct dentry *dentry; struct dentry *dentry;
int ret; int ret;
/*
* The first time anyone tries to mount a cgroup, enable the list
* linking each css_set to its tasks and fix up all existing tasks.
*/
if (!use_task_css_set_links)
cgroup_enable_task_cg_lists();
retry: retry:
mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex); mutex_lock(&cgroup_mutex);
...@@ -1692,9 +1700,7 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, ...@@ -1692,9 +1700,7 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
rcu_assign_pointer(tsk->cgroups, new_cset); rcu_assign_pointer(tsk->cgroups, new_cset);
task_unlock(tsk); task_unlock(tsk);
/* Update the css_set linked lists if we're using them */
write_lock(&css_set_lock); write_lock(&css_set_lock);
if (!list_empty(&tsk->cg_list))
list_move(&tsk->cg_list, &new_cset->tasks); list_move(&tsk->cg_list, &new_cset->tasks);
write_unlock(&css_set_lock); write_unlock(&css_set_lock);
...@@ -2362,13 +2368,19 @@ int cgroup_task_count(const struct cgroup *cgrp) ...@@ -2362,13 +2368,19 @@ int cgroup_task_count(const struct cgroup *cgrp)
* To reduce the fork() overhead for systems that are not actually using * To reduce the fork() overhead for systems that are not actually using
* their cgroups capability, we don't maintain the lists running through * their cgroups capability, we don't maintain the lists running through
* each css_set to its tasks until we see the list actually used - in other * each css_set to its tasks until we see the list actually used - in other
* words after the first call to css_task_iter_start(). * words after the first mount.
*/ */
static void cgroup_enable_task_cg_lists(void) static void cgroup_enable_task_cg_lists(void)
{ {
struct task_struct *p, *g; struct task_struct *p, *g;
write_lock(&css_set_lock); write_lock(&css_set_lock);
use_task_css_set_links = 1;
if (use_task_css_set_links)
goto out_unlock;
use_task_css_set_links = true;
/* /*
* We need tasklist_lock because RCU is not safe against * We need tasklist_lock because RCU is not safe against
* while_each_thread(). Besides, a forking task that has passed * while_each_thread(). Besides, a forking task that has passed
...@@ -2379,16 +2391,22 @@ static void cgroup_enable_task_cg_lists(void) ...@@ -2379,16 +2391,22 @@ static void cgroup_enable_task_cg_lists(void)
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
do_each_thread(g, p) { do_each_thread(g, p) {
task_lock(p); task_lock(p);
WARN_ON_ONCE(!list_empty(&p->cg_list) ||
task_css_set(p) != &init_css_set);
/* /*
* We should check if the process is exiting, otherwise * We should check if the process is exiting, otherwise
* it will race with cgroup_exit() in that the list * it will race with cgroup_exit() in that the list
* entry won't be deleted though the process has exited. * entry won't be deleted though the process has exited.
*/ */
if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) if (!(p->flags & PF_EXITING))
list_add(&p->cg_list, &task_css_set(p)->tasks); list_add(&p->cg_list, &task_css_set(p)->tasks);
task_unlock(p); task_unlock(p);
} while_each_thread(g, p); } while_each_thread(g, p);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
out_unlock:
write_unlock(&css_set_lock); write_unlock(&css_set_lock);
} }
...@@ -2621,13 +2639,8 @@ void css_task_iter_start(struct cgroup_subsys_state *css, ...@@ -2621,13 +2639,8 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
struct css_task_iter *it) struct css_task_iter *it)
__acquires(css_set_lock) __acquires(css_set_lock)
{ {
/* /* no one should try to iterate before mounting cgroups */
* The first time anyone tries to iterate across a css, we need to WARN_ON_ONCE(!use_task_css_set_links);
* enable the list linking each css_set to its tasks, and fix up
* all existing tasks.
*/
if (!use_task_css_set_links)
cgroup_enable_task_cg_lists();
read_lock(&css_set_lock); read_lock(&css_set_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment