Commit 73810a06 authored by Thomas Gleixner's avatar Thomas Gleixner

crypto: N2 - Replace racy task affinity logic

spu_queue_register() needs to invoke setup functions on a particular
CPU. This is achieved by temporarily setting the affinity of the
calling user space thread to the requested CPU and reset it to the original
affinity afterwards.

That's racy vs. CPU hotplug and concurrent affinity settings for that
thread resulting in code executing on the wrong CPU and overwriting the
new affinity setting.

Replace it by using work_on_cpu_safe() which guarantees to run the code on
the requested CPU or to fail in case the CPU is offline.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Acked-by: default avatar"David S. Miller" <davem@davemloft.net>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: linux-crypto@vger.kernel.org
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Tejun Heo <tj@kernel.org>
Cc: Len Brown <lenb@kernel.org>
Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704131019420.2408@nanosSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 12699ac5
......@@ -65,6 +65,11 @@ struct spu_queue {
struct list_head list;
};
struct spu_qreg {
struct spu_queue *queue;
unsigned long type;
};
static struct spu_queue **cpu_to_cwq;
static struct spu_queue **cpu_to_mau;
......@@ -1631,31 +1636,27 @@ static void queue_cache_destroy(void)
kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
}
static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
static long spu_queue_register_workfn(void *arg)
{
cpumask_var_t old_allowed;
struct spu_qreg *qr = arg;
struct spu_queue *p = qr->queue;
unsigned long q_type = qr->type;
unsigned long hv_ret;
if (cpumask_empty(&p->sharing))
return -EINVAL;
if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(old_allowed, &current->cpus_allowed);
set_cpus_allowed_ptr(current, &p->sharing);
hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
CWQ_NUM_ENTRIES, &p->qhandle);
if (!hv_ret)
sun4v_ncs_sethead_marker(p->qhandle, 0);
set_cpus_allowed_ptr(current, old_allowed);
return hv_ret ? -EINVAL : 0;
}
free_cpumask_var(old_allowed);
static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
{
int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
struct spu_qreg qr = { .queue = p, .type = q_type };
return (hv_ret ? -EINVAL : 0);
return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
}
static int spu_queue_setup(struct spu_queue *p)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment