Commit d8742e22 authored by Tejun Heo's avatar Tejun Heo

cgroup: css_set_lock should nest inside tasklist_lock

cgroup_enable_task_cg_lists() incorrectly nests non-irq-safe
tasklist_lock inside irq-safe css_set_lock triggering the following
lockdep warning.

  WARNING: possible irq lock inversion dependency detected
  4.17.0-rc1-00027-gb37d049 #6 Not tainted
  --------------------------------------------------------
  systemd/1 just changed the state of lock:
  00000000fe57773b (css_set_lock){..-.}, at: cgroup_free+0xf2/0x12a
  but this lock took another, SOFTIRQ-unsafe lock in the past:
   (tasklist_lock){.+.+}

  and interrupts could create inverse lock ordering between them.

  other info that might help us debug this:
   Possible interrupt unsafe locking scenario:

	 CPU0                    CPU1
	 ----                    ----
    lock(tasklist_lock);
				 local_irq_disable();
				 lock(css_set_lock);
				 lock(tasklist_lock);
    <Interrupt>
      lock(css_set_lock);

   *** DEADLOCK ***

The condition is highly unlikely to actually happen especially given
that the path is executed only once per boot.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reported-by: default avatarBoqun Feng <boqun.feng@gmail.com>
parent cc659e76
...@@ -1798,13 +1798,6 @@ static void cgroup_enable_task_cg_lists(void) ...@@ -1798,13 +1798,6 @@ static void cgroup_enable_task_cg_lists(void)
{ {
struct task_struct *p, *g; struct task_struct *p, *g;
spin_lock_irq(&css_set_lock);
if (use_task_css_set_links)
goto out_unlock;
use_task_css_set_links = true;
/* /*
* We need tasklist_lock because RCU is not safe against * We need tasklist_lock because RCU is not safe against
* while_each_thread(). Besides, a forking task that has passed * while_each_thread(). Besides, a forking task that has passed
...@@ -1813,6 +1806,13 @@ static void cgroup_enable_task_cg_lists(void) ...@@ -1813,6 +1806,13 @@ static void cgroup_enable_task_cg_lists(void)
* tasklist if we walk through it with RCU. * tasklist if we walk through it with RCU.
*/ */
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
spin_lock_irq(&css_set_lock);
if (use_task_css_set_links)
goto out_unlock;
use_task_css_set_links = true;
do_each_thread(g, p) { do_each_thread(g, p) {
WARN_ON_ONCE(!list_empty(&p->cg_list) || WARN_ON_ONCE(!list_empty(&p->cg_list) ||
task_css_set(p) != &init_css_set); task_css_set(p) != &init_css_set);
...@@ -1840,9 +1840,9 @@ static void cgroup_enable_task_cg_lists(void) ...@@ -1840,9 +1840,9 @@ static void cgroup_enable_task_cg_lists(void)
} }
spin_unlock(&p->sighand->siglock); spin_unlock(&p->sighand->siglock);
} while_each_thread(g, p); } while_each_thread(g, p);
read_unlock(&tasklist_lock);
out_unlock: out_unlock:
spin_unlock_irq(&css_set_lock); spin_unlock_irq(&css_set_lock);
read_unlock(&tasklist_lock);
} }
static void init_cgroup_housekeeping(struct cgroup *cgrp) static void init_cgroup_housekeeping(struct cgroup *cgrp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment