Commit 154abafc authored by Eric W. Biederman's avatar Eric W. Biederman Committed by Ingo Molnar

tasks, sched/core: With a grace period after finish_task_switch(), remove unnecessary code

Remove work arounds that were written before there was a grace period
after tasks left the runqueue in finish_task_switch().

In particular now that there tasks exiting the runqueue exprience
a RCU grace period none of the work performed by task_rcu_dereference()
excpet the rcu_dereference() is necessary so replace task_rcu_dereference()
with rcu_dereference().

Remove the code in rcuwait_wait_event() that checks to ensure the current
task has not exited.  It is no longer necessary as it is guaranteed
that any running task will experience a RCU grace period after it
leaves the run queueue.

Remove the comment in rcuwait_wake_up() as it is no longer relevant.

Ref: 8f95c90c ("sched/wait, RCU: Introduce rcuwait machinery")
Ref: 150593bf ("sched/api: Introduce task_rcu_dereference() and try_get_task_struct()")
Signed-off-by: default avatarEric W. Biederman <ebiederm@xmission.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Kirill Tkhai <tkhai@yandex.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King - ARM Linux admin <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/87lfurdpk9.fsf_-_@x220.int.ebiederm.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0ff7b2cf
...@@ -6,16 +6,11 @@ ...@@ -6,16 +6,11 @@
/* /*
* rcuwait provides a way of blocking and waking up a single * rcuwait provides a way of blocking and waking up a single
* task in an rcu-safe manner; where it is forbidden to use * task in an rcu-safe manner.
* after exit_notify(). task_struct is not properly rcu protected,
* unless dealing with rcu-aware lists, ie: find_task_by_*().
* *
* Alternatively we have task_rcu_dereference(), but the return * The only time @task is non-nil is when a user is blocked (or
* semantics have different implications which would break the * checking if it needs to) on a condition, and reset as soon as we
* wakeup side. The only time @task is non-nil is when a user is * know that the condition has succeeded and are awoken.
* blocked (or checking if it needs to) on a condition, and reset
* as soon as we know that the condition has succeeded and are
* awoken.
*/ */
struct rcuwait { struct rcuwait {
struct task_struct __rcu *task; struct task_struct __rcu *task;
...@@ -37,13 +32,6 @@ extern void rcuwait_wake_up(struct rcuwait *w); ...@@ -37,13 +32,6 @@ extern void rcuwait_wake_up(struct rcuwait *w);
*/ */
#define rcuwait_wait_event(w, condition) \ #define rcuwait_wait_event(w, condition) \
({ \ ({ \
/* \
* Complain if we are called after do_exit()/exit_notify(), \
* as we cannot rely on the rcu critical region for the \
* wakeup side. \
*/ \
WARN_ON(current->exit_state); \
\
rcu_assign_pointer((w)->task, current); \ rcu_assign_pointer((w)->task, current); \
for (;;) { \ for (;;) { \
/* \ /* \
......
...@@ -119,7 +119,6 @@ static inline void put_task_struct(struct task_struct *t) ...@@ -119,7 +119,6 @@ static inline void put_task_struct(struct task_struct *t)
__put_task_struct(t); __put_task_struct(t);
} }
struct task_struct *task_rcu_dereference(struct task_struct **ptask);
void put_task_struct_rcu_user(struct task_struct *task); void put_task_struct_rcu_user(struct task_struct *task);
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
......
...@@ -234,69 +234,6 @@ void release_task(struct task_struct *p) ...@@ -234,69 +234,6 @@ void release_task(struct task_struct *p)
goto repeat; goto repeat;
} }
/*
* Note that if this function returns a valid task_struct pointer (!NULL)
* task->usage must remain >0 for the duration of the RCU critical section.
*/
struct task_struct *task_rcu_dereference(struct task_struct **ptask)
{
struct sighand_struct *sighand;
struct task_struct *task;
/*
* We need to verify that release_task() was not called and thus
* delayed_put_task_struct() can't run and drop the last reference
* before rcu_read_unlock(). We check task->sighand != NULL,
* but we can read the already freed and reused memory.
*/
retry:
task = rcu_dereference(*ptask);
if (!task)
return NULL;
probe_kernel_address(&task->sighand, sighand);
/*
* Pairs with atomic_dec_and_test() in put_task_struct(). If this task
* was already freed we can not miss the preceding update of this
* pointer.
*/
smp_rmb();
if (unlikely(task != READ_ONCE(*ptask)))
goto retry;
/*
* We've re-checked that "task == *ptask", now we have two different
* cases:
*
* 1. This is actually the same task/task_struct. In this case
* sighand != NULL tells us it is still alive.
*
* 2. This is another task which got the same memory for task_struct.
* We can't know this of course, and we can not trust
* sighand != NULL.
*
* In this case we actually return a random value, but this is
* correct.
*
* If we return NULL - we can pretend that we actually noticed that
* *ptask was updated when the previous task has exited. Or pretend
* that probe_slab_address(&sighand) reads NULL.
*
* If we return the new task (because sighand is not NULL for any
* reason) - this is fine too. This (new) task can't go away before
* another gp pass.
*
* And note: We could even eliminate the false positive if re-read
* task->sighand once again to avoid the falsely NULL. But this case
* is very unlikely so we don't care.
*/
if (!sighand)
return NULL;
return task;
}
void rcuwait_wake_up(struct rcuwait *w) void rcuwait_wake_up(struct rcuwait *w)
{ {
struct task_struct *task; struct task_struct *task;
...@@ -316,10 +253,6 @@ void rcuwait_wake_up(struct rcuwait *w) ...@@ -316,10 +253,6 @@ void rcuwait_wake_up(struct rcuwait *w)
*/ */
smp_mb(); /* (B) */ smp_mb(); /* (B) */
/*
* Avoid using task_rcu_dereference() magic as long as we are careful,
* see comment in rcuwait_wait_event() regarding ->exit_state.
*/
task = rcu_dereference(w->task); task = rcu_dereference(w->task);
if (task) if (task)
wake_up_process(task); wake_up_process(task);
......
...@@ -1602,7 +1602,7 @@ static void task_numa_compare(struct task_numa_env *env, ...@@ -1602,7 +1602,7 @@ static void task_numa_compare(struct task_numa_env *env,
return; return;
rcu_read_lock(); rcu_read_lock();
cur = task_rcu_dereference(&dst_rq->curr); cur = rcu_dereference(dst_rq->curr);
if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
cur = NULL; cur = NULL;
......
...@@ -71,7 +71,7 @@ static int membarrier_global_expedited(void) ...@@ -71,7 +71,7 @@ static int membarrier_global_expedited(void)
continue; continue;
rcu_read_lock(); rcu_read_lock();
p = task_rcu_dereference(&cpu_rq(cpu)->curr); p = rcu_dereference(cpu_rq(cpu)->curr);
if (p && p->mm && (atomic_read(&p->mm->membarrier_state) & if (p && p->mm && (atomic_read(&p->mm->membarrier_state) &
MEMBARRIER_STATE_GLOBAL_EXPEDITED)) { MEMBARRIER_STATE_GLOBAL_EXPEDITED)) {
if (!fallback) if (!fallback)
...@@ -150,7 +150,7 @@ static int membarrier_private_expedited(int flags) ...@@ -150,7 +150,7 @@ static int membarrier_private_expedited(int flags)
if (cpu == raw_smp_processor_id()) if (cpu == raw_smp_processor_id())
continue; continue;
rcu_read_lock(); rcu_read_lock();
p = task_rcu_dereference(&cpu_rq(cpu)->curr); p = rcu_dereference(cpu_rq(cpu)->curr);
if (p && p->mm == current->mm) { if (p && p->mm == current->mm) {
if (!fallback) if (!fallback)
__cpumask_set_cpu(cpu, tmpmask); __cpumask_set_cpu(cpu, tmpmask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment