Commit c172e0a3 authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Ingo Molnar

sched/membarrier: Return -ENOMEM to userspace on memory allocation failure

Remove the IPI fallback code from membarrier to deal with very
infrequent cpumask memory allocation failure. Use GFP_KERNEL rather
than GFP_NOWAIT, and relax the blocking guarantees for the expedited
membarrier system call commands, allowing it to block if waiting for
memory to be made available.

In addition, now -ENOMEM can be returned to user-space if the cpumask
memory allocation fails.
Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Kirill Tkhai <tkhai@yandex.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King - ARM Linux admin <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190919173705.2181-8-mathieu.desnoyers@efficios.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c6d68c1c
...@@ -66,7 +66,6 @@ void membarrier_exec_mmap(struct mm_struct *mm) ...@@ -66,7 +66,6 @@ void membarrier_exec_mmap(struct mm_struct *mm)
static int membarrier_global_expedited(void) static int membarrier_global_expedited(void)
{ {
int cpu; int cpu;
bool fallback = false;
cpumask_var_t tmpmask; cpumask_var_t tmpmask;
if (num_online_cpus() == 1) if (num_online_cpus() == 1)
...@@ -78,15 +77,8 @@ static int membarrier_global_expedited(void) ...@@ -78,15 +77,8 @@ static int membarrier_global_expedited(void)
*/ */
smp_mb(); /* system call entry is not a mb. */ smp_mb(); /* system call entry is not a mb. */
/* if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
* Expedited membarrier commands guarantee that they won't return -ENOMEM;
* block, hence the GFP_NOWAIT allocation flag and fallback
* implementation.
*/
if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
/* Fallback for OOM. */
fallback = true;
}
cpus_read_lock(); cpus_read_lock();
rcu_read_lock(); rcu_read_lock();
...@@ -117,18 +109,15 @@ static int membarrier_global_expedited(void) ...@@ -117,18 +109,15 @@ static int membarrier_global_expedited(void)
if (p->flags & PF_KTHREAD) if (p->flags & PF_KTHREAD)
continue; continue;
if (!fallback) __cpumask_set_cpu(cpu, tmpmask);
__cpumask_set_cpu(cpu, tmpmask);
else
smp_call_function_single(cpu, ipi_mb, NULL, 1);
} }
rcu_read_unlock(); rcu_read_unlock();
if (!fallback) {
preempt_disable(); preempt_disable();
smp_call_function_many(tmpmask, ipi_mb, NULL, 1); smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
preempt_enable(); preempt_enable();
free_cpumask_var(tmpmask);
} free_cpumask_var(tmpmask);
cpus_read_unlock(); cpus_read_unlock();
/* /*
...@@ -143,7 +132,6 @@ static int membarrier_global_expedited(void) ...@@ -143,7 +132,6 @@ static int membarrier_global_expedited(void)
static int membarrier_private_expedited(int flags) static int membarrier_private_expedited(int flags)
{ {
int cpu; int cpu;
bool fallback = false;
cpumask_var_t tmpmask; cpumask_var_t tmpmask;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
...@@ -168,15 +156,8 @@ static int membarrier_private_expedited(int flags) ...@@ -168,15 +156,8 @@ static int membarrier_private_expedited(int flags)
*/ */
smp_mb(); /* system call entry is not a mb. */ smp_mb(); /* system call entry is not a mb. */
/* if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
* Expedited membarrier commands guarantee that they won't return -ENOMEM;
* block, hence the GFP_NOWAIT allocation flag and fallback
* implementation.
*/
if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
/* Fallback for OOM. */
fallback = true;
}
cpus_read_lock(); cpus_read_lock();
rcu_read_lock(); rcu_read_lock();
...@@ -195,20 +176,16 @@ static int membarrier_private_expedited(int flags) ...@@ -195,20 +176,16 @@ static int membarrier_private_expedited(int flags)
continue; continue;
rcu_read_lock(); rcu_read_lock();
p = rcu_dereference(cpu_rq(cpu)->curr); p = rcu_dereference(cpu_rq(cpu)->curr);
if (p && p->mm == mm) { if (p && p->mm == mm)
if (!fallback) __cpumask_set_cpu(cpu, tmpmask);
__cpumask_set_cpu(cpu, tmpmask);
else
smp_call_function_single(cpu, ipi_mb, NULL, 1);
}
} }
rcu_read_unlock(); rcu_read_unlock();
if (!fallback) {
preempt_disable(); preempt_disable();
smp_call_function_many(tmpmask, ipi_mb, NULL, 1); smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
preempt_enable(); preempt_enable();
free_cpumask_var(tmpmask);
} free_cpumask_var(tmpmask);
cpus_read_unlock(); cpus_read_unlock();
/* /*
...@@ -264,7 +241,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) ...@@ -264,7 +241,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
struct task_struct *p; struct task_struct *p;
p = rcu_dereference(&rq->curr); p = rcu_dereference(rq->curr);
if (p && p->mm == mm) if (p && p->mm == mm)
__cpumask_set_cpu(cpu, tmpmask); __cpumask_set_cpu(cpu, tmpmask);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment