Commit 9925cc13 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Just a couple of stragglers here:

   - fix an issue migrating interrupts on CPU hotplug
   - fix a potential information leak of TLS registers across an exec
     (Nathan has sent a corresponding patch for arch/arm/ to rmk)"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: flush TLS registers during exec
  arm64: use irq_set_affinity with force=false when migrating irqs
parents 753a6cb7 eb35bdd7
...@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc) ...@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false; return false;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
affinity = cpu_online_mask;
ret = true; ret = true;
}
/*
* when using forced irq_set_affinity we must ensure that the cpu
* being offlined is not present in the affinity mask, it may be
* selected as the target CPU otherwise
*/
affinity = cpu_online_mask;
c = irq_data_get_irq_chip(d); c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity) if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq); pr_debug("IRQ%u: unable to set affinity\n", d->irq);
else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity); cpumask_copy(d->affinity, affinity);
return ret; return ret;
......
...@@ -230,9 +230,27 @@ void exit_thread(void) ...@@ -230,9 +230,27 @@ void exit_thread(void)
{ {
} }
static void tls_thread_flush(void)
{
asm ("msr tpidr_el0, xzr");
if (is_compat_task()) {
current->thread.tp_value = 0;
/*
* We need to ensure ordering between the shadow state and the
* hardware state, so that we don't corrupt the hardware state
* with a stale shadow state during context switch.
*/
barrier();
asm ("msr tpidrro_el0, xzr");
}
}
void flush_thread(void) void flush_thread(void)
{ {
fpsimd_flush_thread(); fpsimd_flush_thread();
tls_thread_flush();
flush_ptrace_hw_breakpoint(current); flush_ptrace_hw_breakpoint(current);
} }
......
...@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs) ...@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs)
case __ARM_NR_compat_set_tls: case __ARM_NR_compat_set_tls:
current->thread.tp_value = regs->regs[0]; current->thread.tp_value = regs->regs[0];
/*
* Protect against register corruption from context switch.
* See comment in tls_thread_flush.
*/
barrier();
asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0]));
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment