Commit c9e75f04 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Tejun Heo

cgroup: pids: fix race between cgroup_post_fork() and cgroup_migrate()

If the new child migrates to another cgroup before cgroup_post_fork() calls
subsys->fork(), then both pids_can_attach() and pids_fork() will do the same
pids_uncharge(old_pids) + pids_charge(pids) sequence twice.

Change copy_process() to call threadgroup_change_begin/threadgroup_change_end
unconditionally. percpu_down_read() is cheap and this allows other cleanups,
see the next changes.

Also, this way we can unify cgroup_threadgroup_rwsem and dup_mmap_sem.
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarZefan Li <lizefan@huawei.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 53254f90
...@@ -243,27 +243,10 @@ static void pids_cancel_fork(struct task_struct *task, void *priv) ...@@ -243,27 +243,10 @@ static void pids_cancel_fork(struct task_struct *task, void *priv)
static void pids_fork(struct task_struct *task, void *priv) static void pids_fork(struct task_struct *task, void *priv)
{ {
struct cgroup_subsys_state *css; struct cgroup_subsys_state *css = priv;
struct cgroup_subsys_state *old_css = priv;
struct pids_cgroup *pids;
struct pids_cgroup *old_pids = css_pids(old_css);
css = task_get_css(task, pids_cgrp_id);
pids = css_pids(css);
/*
* If the association has changed, we have to revert and reapply the
* charge/uncharge on the wrong hierarchy to the current one. Since
* the association can only change due to an organisation event, its
* okay for us to ignore the limit in this case.
*/
if (pids != old_pids) {
pids_uncharge(old_pids, 1);
pids_charge(pids, 1);
}
WARN_ON(task_css_check(task, pids_cgrp_id, true) != css);
css_put(css); css_put(css);
css_put(old_css);
} }
static void pids_free(struct task_struct *task) static void pids_free(struct task_struct *task)
......
...@@ -1368,8 +1368,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1368,8 +1368,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->real_start_time = ktime_get_boot_ns(); p->real_start_time = ktime_get_boot_ns();
p->io_context = NULL; p->io_context = NULL;
p->audit_context = NULL; p->audit_context = NULL;
if (clone_flags & CLONE_THREAD) threadgroup_change_begin(current);
threadgroup_change_begin(current);
cgroup_fork(p); cgroup_fork(p);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy); p->mempolicy = mpol_dup(p->mempolicy);
...@@ -1610,8 +1609,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1610,8 +1609,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
proc_fork_connector(p); proc_fork_connector(p);
cgroup_post_fork(p, cgrp_ss_priv); cgroup_post_fork(p, cgrp_ss_priv);
if (clone_flags & CLONE_THREAD) threadgroup_change_end(current);
threadgroup_change_end(current);
perf_event_fork(p); perf_event_fork(p);
trace_task_newtask(p, clone_flags); trace_task_newtask(p, clone_flags);
...@@ -1652,8 +1650,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1652,8 +1650,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
mpol_put(p->mempolicy); mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock: bad_fork_cleanup_threadgroup_lock:
#endif #endif
if (clone_flags & CLONE_THREAD) threadgroup_change_end(current);
threadgroup_change_end(current);
delayacct_tsk_free(p); delayacct_tsk_free(p);
bad_fork_cleanup_count: bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes); atomic_dec(&p->cred->user->processes);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment