Commit c947948f authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/mm' into for-next/core

* for-next/mm:
  arm64: booting: Require placement within 48-bit addressable memory
  arm64: mm: kfence: only handle translation faults
  arm64/mm: Simplify and document pte_to_phys() for 52 bit addresses
parents 37f5d61a 453dfcee
...@@ -121,8 +121,9 @@ Header notes: ...@@ -121,8 +121,9 @@ Header notes:
to the base of DRAM, since memory below it is not to the base of DRAM, since memory below it is not
accessible via the linear mapping accessible via the linear mapping
1 1
2MB aligned base may be anywhere in physical 2MB aligned base such that all image_size bytes
memory counted from the start of the image are within
the 48-bit addressable range of physical memory
Bits 4-63 Reserved. Bits 4-63 Reserved.
============= =============================================================== ============= ===============================================================
......
...@@ -660,12 +660,10 @@ alternative_endif ...@@ -660,12 +660,10 @@ alternative_endif
.endm .endm
.macro pte_to_phys, phys, pte .macro pte_to_phys, phys, pte
#ifdef CONFIG_ARM64_PA_BITS_52
ubfiz \phys, \pte, #(48 - 16 - 12), #16
bfxil \phys, \pte, #16, #32
lsl \phys, \phys, #16
#else
and \phys, \pte, #PTE_ADDR_MASK and \phys, \pte, #PTE_ADDR_MASK
#ifdef CONFIG_ARM64_PA_BITS_52
orr \phys, \phys, \phys, lsl #PTE_ADDR_HIGH_SHIFT
and \phys, \phys, GENMASK_ULL(PHYS_MASK_SHIFT - 1, PAGE_SHIFT)
#endif #endif
.endm .endm
......
...@@ -159,6 +159,7 @@ ...@@ -159,6 +159,7 @@
#ifdef CONFIG_ARM64_PA_BITS_52 #ifdef CONFIG_ARM64_PA_BITS_52
#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12) #define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
#define PTE_ADDR_MASK (PTE_ADDR_LOW | PTE_ADDR_HIGH) #define PTE_ADDR_MASK (PTE_ADDR_LOW | PTE_ADDR_HIGH)
#define PTE_ADDR_HIGH_SHIFT 36
#else #else
#define PTE_ADDR_MASK PTE_ADDR_LOW #define PTE_ADDR_MASK PTE_ADDR_LOW
#endif #endif
......
...@@ -77,11 +77,11 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ...@@ -77,11 +77,11 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
static inline phys_addr_t __pte_to_phys(pte_t pte) static inline phys_addr_t __pte_to_phys(pte_t pte)
{ {
return (pte_val(pte) & PTE_ADDR_LOW) | return (pte_val(pte) & PTE_ADDR_LOW) |
((pte_val(pte) & PTE_ADDR_HIGH) << 36); ((pte_val(pte) & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT);
} }
static inline pteval_t __phys_to_pte_val(phys_addr_t phys) static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
{ {
return (phys | (phys >> 36)) & PTE_ADDR_MASK; return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PTE_ADDR_MASK;
} }
#else #else
#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK) #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
......
...@@ -354,6 +354,11 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned long esr) ...@@ -354,6 +354,11 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned long esr)
return false; return false;
} }
static bool is_translation_fault(unsigned long esr)
{
return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
}
static void __do_kernel_fault(unsigned long addr, unsigned long esr, static void __do_kernel_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
...@@ -386,7 +391,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr, ...@@ -386,7 +391,8 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
} else if (addr < PAGE_SIZE) { } else if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference"; msg = "NULL pointer dereference";
} else { } else {
if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) if (is_translation_fault(esr) &&
kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
return; return;
msg = "paging request"; msg = "paging request";
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment