Commit 6e7e7f4d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Fix some more FP register fallout from the SVE patches and also some
  problems with the PGD tracking in our software PAN emulation code,
  after we received a crash report from a 3.18 kernel running a
  backport.

  Summary:

   - fix SW PAN pgd shadowing for kernel threads, EFI and exiting user
     tasks

   - fix FP register leak when a task_struct is re-allocated

   - fix potential use-after-free in FP state tracking used by KVM"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64/sve: Avoid dereference of dead task_struct in KVM guest entry
  arm64: SW PAN: Update saved ttbr0 value on enter_lazy_tlb
  arm64: SW PAN: Point saved ttbr0 at the zero page when switching to init_mm
  arm64: fpsimd: Abstract out binding of task's fpsimd context to the cpu.
  arm64: fpsimd: Prevent registers leaking from dead tasks
parents 3625de4b cb968afc
...@@ -132,10 +132,8 @@ static inline void efi_set_pgd(struct mm_struct *mm) ...@@ -132,10 +132,8 @@ static inline void efi_set_pgd(struct mm_struct *mm)
* Defer the switch to the current thread's TTBR0_EL1 * Defer the switch to the current thread's TTBR0_EL1
* until uaccess_enable(). Restore the current * until uaccess_enable(). Restore the current
* thread's saved ttbr0 corresponding to its active_mm * thread's saved ttbr0 corresponding to its active_mm
* (if different from init_mm).
*/ */
cpu_set_reserved_ttbr0(); cpu_set_reserved_ttbr0();
if (current->active_mm != &init_mm)
update_saved_ttbr0(current, current->active_mm); update_saved_ttbr0(current, current->active_mm);
} }
} }
......
...@@ -156,29 +156,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); ...@@ -156,29 +156,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
/*
* This is called when "tsk" is about to enter lazy TLB mode.
*
* mm: describes the currently active mm context
* tsk: task which is entering lazy tlb
* cpu: cpu number which is entering lazy tlb
*
* tsk->mm will be NULL
*/
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
#ifdef CONFIG_ARM64_SW_TTBR0_PAN #ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void update_saved_ttbr0(struct task_struct *tsk, static inline void update_saved_ttbr0(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
if (system_uses_ttbr0_pan()) { u64 ttbr;
BUG_ON(mm->pgd == swapper_pg_dir);
task_thread_info(tsk)->ttbr0 = if (!system_uses_ttbr0_pan())
virt_to_phys(mm->pgd) | ASID(mm) << 48; return;
}
if (mm == &init_mm)
ttbr = __pa_symbol(empty_zero_page);
else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
task_thread_info(tsk)->ttbr0 = ttbr;
} }
#else #else
static inline void update_saved_ttbr0(struct task_struct *tsk, static inline void update_saved_ttbr0(struct task_struct *tsk,
...@@ -187,6 +179,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk, ...@@ -187,6 +179,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
} }
#endif #endif
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
/*
* We don't actually care about the ttbr0 mapping, so point it at the
* zero page.
*/
update_saved_ttbr0(tsk, &init_mm);
}
static inline void __switch_mm(struct mm_struct *next) static inline void __switch_mm(struct mm_struct *next)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -214,10 +216,8 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -214,10 +216,8 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Update the saved TTBR0_EL1 of the scheduled-in task as the previous * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
* value may have not been initialised yet (activate_mm caller) or the * value may have not been initialised yet (activate_mm caller) or the
* ASID has changed since the last run (following the context switch * ASID has changed since the last run (following the context switch
* of another thread of the same process). Avoid setting the reserved * of another thread of the same process).
* TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
*/ */
if (next != &init_mm)
update_saved_ttbr0(tsk, next); update_saved_ttbr0(tsk, next);
} }
......
...@@ -114,7 +114,12 @@ ...@@ -114,7 +114,12 @@
* returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so * returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
* whatever is in the FPSIMD registers is not saved to memory, but discarded. * whatever is in the FPSIMD registers is not saved to memory, but discarded.
*/ */
static DEFINE_PER_CPU(struct fpsimd_state *, fpsimd_last_state); struct fpsimd_last_state_struct {
struct fpsimd_state *st;
bool sve_in_use;
};
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
/* Default VL for tasks that don't set it explicitly: */ /* Default VL for tasks that don't set it explicitly: */
static int sve_default_vl = -1; static int sve_default_vl = -1;
...@@ -905,7 +910,7 @@ void fpsimd_thread_switch(struct task_struct *next) ...@@ -905,7 +910,7 @@ void fpsimd_thread_switch(struct task_struct *next)
*/ */
struct fpsimd_state *st = &next->thread.fpsimd_state; struct fpsimd_state *st = &next->thread.fpsimd_state;
if (__this_cpu_read(fpsimd_last_state) == st if (__this_cpu_read(fpsimd_last_state.st) == st
&& st->cpu == smp_processor_id()) && st->cpu == smp_processor_id())
clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
else else
...@@ -991,6 +996,21 @@ void fpsimd_signal_preserve_current_state(void) ...@@ -991,6 +996,21 @@ void fpsimd_signal_preserve_current_state(void)
sve_to_fpsimd(current); sve_to_fpsimd(current);
} }
/*
* Associate current's FPSIMD context with this cpu
* Preemption must be disabled when calling this function.
*/
static void fpsimd_bind_to_cpu(void)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
struct fpsimd_state *st = &current->thread.fpsimd_state;
last->st = st;
last->sve_in_use = test_thread_flag(TIF_SVE);
st->cpu = smp_processor_id();
}
/* /*
* Load the userland FPSIMD state of 'current' from memory, but only if the * Load the userland FPSIMD state of 'current' from memory, but only if the
* FPSIMD state already held in the registers is /not/ the most recent FPSIMD * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
...@@ -1004,11 +1024,8 @@ void fpsimd_restore_current_state(void) ...@@ -1004,11 +1024,8 @@ void fpsimd_restore_current_state(void)
local_bh_disable(); local_bh_disable();
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = &current->thread.fpsimd_state;
task_fpsimd_load(); task_fpsimd_load();
__this_cpu_write(fpsimd_last_state, st); fpsimd_bind_to_cpu();
st->cpu = smp_processor_id();
} }
local_bh_enable(); local_bh_enable();
...@@ -1032,12 +1049,8 @@ void fpsimd_update_current_state(struct fpsimd_state *state) ...@@ -1032,12 +1049,8 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
task_fpsimd_load(); task_fpsimd_load();
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE))
struct fpsimd_state *st = &current->thread.fpsimd_state; fpsimd_bind_to_cpu();
__this_cpu_write(fpsimd_last_state, st);
st->cpu = smp_processor_id();
}
local_bh_enable(); local_bh_enable();
} }
...@@ -1052,7 +1065,7 @@ void fpsimd_flush_task_state(struct task_struct *t) ...@@ -1052,7 +1065,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
static inline void fpsimd_flush_cpu_state(void) static inline void fpsimd_flush_cpu_state(void)
{ {
__this_cpu_write(fpsimd_last_state, NULL); __this_cpu_write(fpsimd_last_state.st, NULL);
} }
/* /*
...@@ -1065,14 +1078,10 @@ static inline void fpsimd_flush_cpu_state(void) ...@@ -1065,14 +1078,10 @@ static inline void fpsimd_flush_cpu_state(void)
#ifdef CONFIG_ARM64_SVE #ifdef CONFIG_ARM64_SVE
void sve_flush_cpu_state(void) void sve_flush_cpu_state(void)
{ {
struct fpsimd_state *const fpstate = __this_cpu_read(fpsimd_last_state); struct fpsimd_last_state_struct const *last =
struct task_struct *tsk; this_cpu_ptr(&fpsimd_last_state);
if (!fpstate)
return;
tsk = container_of(fpstate, struct task_struct, thread.fpsimd_state); if (last->st && last->sve_in_use)
if (test_tsk_thread_flag(tsk, TIF_SVE))
fpsimd_flush_cpu_state(); fpsimd_flush_cpu_state();
} }
#endif /* CONFIG_ARM64_SVE */ #endif /* CONFIG_ARM64_SVE */
...@@ -1267,7 +1276,7 @@ static inline void fpsimd_pm_init(void) { } ...@@ -1267,7 +1276,7 @@ static inline void fpsimd_pm_init(void) { }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int fpsimd_cpu_dead(unsigned int cpu) static int fpsimd_cpu_dead(unsigned int cpu)
{ {
per_cpu(fpsimd_last_state, cpu) = NULL; per_cpu(fpsimd_last_state.st, cpu) = NULL;
return 0; return 0;
} }
......
...@@ -314,6 +314,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, ...@@ -314,6 +314,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
clear_tsk_thread_flag(p, TIF_SVE); clear_tsk_thread_flag(p, TIF_SVE);
p->thread.sve_state = NULL; p->thread.sve_state = NULL;
/*
* In case p was allocated the same task_struct pointer as some
* other recently-exited task, make sure p is disassociated from
* any cpu that may have run that now-exited task recently.
* Otherwise we could erroneously skip reloading the FPSIMD
* registers for p.
*/
fpsimd_flush_task_state(p);
if (likely(!(p->flags & PF_KTHREAD))) { if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs(); *childregs = *current_pt_regs();
childregs->regs[0] = 0; childregs->regs[0] = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment