Commit 356607f2 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan, arm64: fix up fault handling logic

Right now arm64 fault handling code removes pointer tags from addresses
covered by TTBR0 in faults taken from both EL0 and EL1, but doesn't do
that for pointers covered by TTBR1.

This patch adds two helper functions is_ttbr0_addr() and is_ttbr1_addr(),
where the latter one accounts for the fact that TTBR1 pointers might be
tagged when tag-based KASAN is in use, and uses these helper functions to
perform pointer checks in arch/arm64/mm/fault.c.

Link: http://lkml.kernel.org/r/3f349b0e9e48b5df3298a6b4ae0634332274494a.1544099024.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Suggested-by: default avatarMark Rutland <mark.rutland@arm.com>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4d176711
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <asm/daifflags.h> #include <asm/daifflags.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/kasan.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -132,6 +133,18 @@ static void mem_abort_decode(unsigned int esr) ...@@ -132,6 +133,18 @@ static void mem_abort_decode(unsigned int esr)
data_abort_decode(esr); data_abort_decode(esr);
} }
static inline bool is_ttbr0_addr(unsigned long addr)
{
/* entry assembly clears tags for TTBR0 addrs */
return addr < TASK_SIZE;
}
static inline bool is_ttbr1_addr(unsigned long addr)
{
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
return arch_kasan_reset_tag(addr) >= VA_START;
}
/* /*
* Dump out the page tables associated with 'addr' in the currently active mm. * Dump out the page tables associated with 'addr' in the currently active mm.
*/ */
...@@ -141,7 +154,7 @@ void show_pte(unsigned long addr) ...@@ -141,7 +154,7 @@ void show_pte(unsigned long addr)
pgd_t *pgdp; pgd_t *pgdp;
pgd_t pgd; pgd_t pgd;
if (addr < TASK_SIZE) { if (is_ttbr0_addr(addr)) {
/* TTBR0 */ /* TTBR0 */
mm = current->active_mm; mm = current->active_mm;
if (mm == &init_mm) { if (mm == &init_mm) {
...@@ -149,7 +162,7 @@ void show_pte(unsigned long addr) ...@@ -149,7 +162,7 @@ void show_pte(unsigned long addr)
addr); addr);
return; return;
} }
} else if (addr >= VA_START) { } else if (is_ttbr1_addr(addr)) {
/* TTBR1 */ /* TTBR1 */
mm = &init_mm; mm = &init_mm;
} else { } else {
...@@ -254,7 +267,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, ...@@ -254,7 +267,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
if (fsc_type == ESR_ELx_FSC_PERM) if (fsc_type == ESR_ELx_FSC_PERM)
return true; return true;
if (addr < TASK_SIZE && system_uses_ttbr0_pan()) if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
return fsc_type == ESR_ELx_FSC_FAULT && return fsc_type == ESR_ELx_FSC_FAULT &&
(regs->pstate & PSR_PAN_BIT); (regs->pstate & PSR_PAN_BIT);
...@@ -319,7 +332,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr) ...@@ -319,7 +332,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
* type", so we ignore this wrinkle and just return the translation * type", so we ignore this wrinkle and just return the translation
* fault.) * fault.)
*/ */
if (current->thread.fault_address >= TASK_SIZE) { if (!is_ttbr0_addr(current->thread.fault_address)) {
switch (ESR_ELx_EC(esr)) { switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_DABT_LOW: case ESR_ELx_EC_DABT_LOW:
/* /*
...@@ -455,7 +468,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -455,7 +468,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE; mm_flags |= FAULT_FLAG_WRITE;
} }
if (addr < TASK_SIZE && is_el1_permission_fault(addr, esr, regs)) { if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */ /* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS) if (regs->orig_addr_limit == KERNEL_DS)
die_kernel_fault("access to user memory with fs=KERNEL_DS", die_kernel_fault("access to user memory with fs=KERNEL_DS",
...@@ -603,7 +616,7 @@ static int __kprobes do_translation_fault(unsigned long addr, ...@@ -603,7 +616,7 @@ static int __kprobes do_translation_fault(unsigned long addr,
unsigned int esr, unsigned int esr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if (addr < TASK_SIZE) if (is_ttbr0_addr(addr))
return do_page_fault(addr, esr, regs); return do_page_fault(addr, esr, regs);
do_bad_area(addr, esr, regs); do_bad_area(addr, esr, regs);
...@@ -758,7 +771,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, ...@@ -758,7 +771,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
* re-enabled IRQs. If the address is a kernel address, apply * re-enabled IRQs. If the address is a kernel address, apply
* BP hardening prior to enabling IRQs and pre-emption. * BP hardening prior to enabling IRQs and pre-emption.
*/ */
if (addr > TASK_SIZE) if (!is_ttbr0_addr(addr))
arm64_apply_bp_hardening(); arm64_apply_bp_hardening();
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
...@@ -771,7 +784,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, ...@@ -771,7 +784,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if (user_mode(regs)) { if (user_mode(regs)) {
if (instruction_pointer(regs) > TASK_SIZE) if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening(); arm64_apply_bp_hardening();
local_daif_restore(DAIF_PROCCTX); local_daif_restore(DAIF_PROCCTX);
} }
...@@ -825,7 +838,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, ...@@ -825,7 +838,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
if (interrupts_enabled(regs)) if (interrupts_enabled(regs))
trace_hardirqs_off(); trace_hardirqs_off();
if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE) if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening(); arm64_apply_bp_hardening();
if (!inf->fn(addr, esr, regs)) { if (!inf->fn(addr, esr, regs)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment