Commit 178be793 authored by Ingo Molnar's avatar Ingo Molnar

sched: do not normalize kernel threads via SysRq-N

do not normalize kernel threads via SysRq-N: the migration threads,
softlockup threads, etc. might be essential for the system to
function properly. So only zap user tasks.

pointed out by Andi Kleen.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1666703a
...@@ -365,15 +365,6 @@ static inline int cpu_of(struct rq *rq) ...@@ -365,15 +365,6 @@ static inline int cpu_of(struct rq *rq)
#endif #endif
} }
static inline int is_migration_thread(struct task_struct *p, struct rq *rq)
{
#ifdef CONFIG_SMP
return p == rq->migration_thread;
#else
return 0;
#endif
}
/* /*
* Update the per-runqueue clock, as finegrained as the platform can give * Update the per-runqueue clock, as finegrained as the platform can give
* us, but without assuming monotonicity, etc.: * us, but without assuming monotonicity, etc.:
...@@ -6563,6 +6554,12 @@ void normalize_rt_tasks(void) ...@@ -6563,6 +6554,12 @@ void normalize_rt_tasks(void)
read_lock_irq(&tasklist_lock); read_lock_irq(&tasklist_lock);
do_each_thread(g, p) { do_each_thread(g, p) {
/*
* Only normalize user tasks:
*/
if (!p->mm)
continue;
p->se.exec_start = 0; p->se.exec_start = 0;
#ifdef CONFIG_SCHEDSTATS #ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0; p->se.wait_start = 0;
...@@ -6584,7 +6581,6 @@ void normalize_rt_tasks(void) ...@@ -6584,7 +6581,6 @@ void normalize_rt_tasks(void)
spin_lock_irqsave(&p->pi_lock, flags); spin_lock_irqsave(&p->pi_lock, flags);
rq = __task_rq_lock(p); rq = __task_rq_lock(p);
if (!is_migration_thread(p, rq))
normalize_task(rq, p); normalize_task(rq, p);
__task_rq_unlock(rq); __task_rq_unlock(rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment