Commit 0adbdfde authored by Will Deacon's avatar Will Deacon

arm64: SW PAN: Point saved ttbr0 at the zero page when switching to init_mm

update_saved_ttbr0 mandates that mm->pgd is not swapper, since swapper
contains kernel mappings and should never be installed into ttbr0. However,
this means that callers must avoid passing the init_mm to update_saved_ttbr0
which in turn can cause the saved ttbr0 value to be out-of-date in the context
of the idle thread. For example, EFI runtime services may leave the saved ttbr0
pointing at the EFI page table, and kernel threads may end up with stale
references to freed page tables.

This patch changes update_saved_ttbr0 so that the init_mm points the saved
ttbr0 value to the empty zero page, which always exists and never contains
valid translations. EFI and switch can then call into update_saved_ttbr0
unconditionally.

Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Vinayak Menon <vinmenon@codeaurora.org>
Cc: <stable@vger.kernel.org>
Fixes: 39bc88e5 ("arm64: Disable TTBR0_EL1 during normal kernel execution")
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Reported-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent 8884b7bd
...@@ -132,11 +132,9 @@ static inline void efi_set_pgd(struct mm_struct *mm) ...@@ -132,11 +132,9 @@ static inline void efi_set_pgd(struct mm_struct *mm)
* Defer the switch to the current thread's TTBR0_EL1 * Defer the switch to the current thread's TTBR0_EL1
* until uaccess_enable(). Restore the current * until uaccess_enable(). Restore the current
* thread's saved ttbr0 corresponding to its active_mm * thread's saved ttbr0 corresponding to its active_mm
* (if different from init_mm).
*/ */
cpu_set_reserved_ttbr0(); cpu_set_reserved_ttbr0();
if (current->active_mm != &init_mm) update_saved_ttbr0(current, current->active_mm);
update_saved_ttbr0(current, current->active_mm);
} }
} }
} }
......
...@@ -174,11 +174,17 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ...@@ -174,11 +174,17 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
static inline void update_saved_ttbr0(struct task_struct *tsk, static inline void update_saved_ttbr0(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
if (system_uses_ttbr0_pan()) { u64 ttbr;
BUG_ON(mm->pgd == swapper_pg_dir);
task_thread_info(tsk)->ttbr0 = if (!system_uses_ttbr0_pan())
virt_to_phys(mm->pgd) | ASID(mm) << 48; return;
}
if (mm == &init_mm)
ttbr = __pa_symbol(empty_zero_page);
else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
task_thread_info(tsk)->ttbr0 = ttbr;
} }
#else #else
static inline void update_saved_ttbr0(struct task_struct *tsk, static inline void update_saved_ttbr0(struct task_struct *tsk,
...@@ -214,11 +220,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -214,11 +220,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Update the saved TTBR0_EL1 of the scheduled-in task as the previous * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
* value may have not been initialised yet (activate_mm caller) or the * value may have not been initialised yet (activate_mm caller) or the
* ASID has changed since the last run (following the context switch * ASID has changed since the last run (following the context switch
* of another thread of the same process). Avoid setting the reserved * of another thread of the same process).
* TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
*/ */
if (next != &init_mm) update_saved_ttbr0(tsk, next);
update_saved_ttbr0(tsk, next);
} }
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment