Commit c2c94023 authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/mm' into for-next/core

* for-next/mm:
  arm64/mm: use lm_alias() with addresses passed to memblock_free()
  mm: arm64: document why pte is not advanced in contpte_ptep_set_access_flags()
  arm64: Expose the end of the linear map in PHYSMEM_END
  arm64: trans_pgd: mark PTEs entries as valid to avoid dead kexec()
  arm64/mm: Delete __init region from memblock.reserved
parents f661eb5f c02e7c5c
...@@ -110,6 +110,8 @@ ...@@ -110,6 +110,8 @@
#define PAGE_END (_PAGE_END(VA_BITS_MIN)) #define PAGE_END (_PAGE_END(VA_BITS_MIN))
#endif /* CONFIG_KASAN */ #endif /* CONFIG_KASAN */
#define PHYSMEM_END __pa(PAGE_END - 1)
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
/* /*
......
...@@ -421,6 +421,12 @@ int contpte_ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -421,6 +421,12 @@ int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
ptep = contpte_align_down(ptep); ptep = contpte_align_down(ptep);
start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE);
/*
* We are not advancing entry because __ptep_set_access_flags()
* only consumes access flags from entry. And since we have checked
* for the whole contpte block and returned early, pte_same()
* within __ptep_set_access_flags() is likely false.
*/
for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE)
__ptep_set_access_flags(vma, addr, ptep, entry, 0); __ptep_set_access_flags(vma, addr, ptep, entry, 0);
......
...@@ -414,8 +414,16 @@ void __init mem_init(void) ...@@ -414,8 +414,16 @@ void __init mem_init(void)
void free_initmem(void) void free_initmem(void)
{ {
free_reserved_area(lm_alias(__init_begin), void *lm_init_begin = lm_alias(__init_begin);
lm_alias(__init_end), void *lm_init_end = lm_alias(__init_end);
WARN_ON(!IS_ALIGNED((unsigned long)lm_init_begin, PAGE_SIZE));
WARN_ON(!IS_ALIGNED((unsigned long)lm_init_end, PAGE_SIZE));
/* Delete __init region from memblock.reserved. */
memblock_free(lm_init_begin, lm_init_end - lm_init_begin);
free_reserved_area(lm_init_begin, lm_init_end,
POISON_FREE_INITMEM, "unused kernel"); POISON_FREE_INITMEM, "unused kernel");
/* /*
* Unmap the __init region but leave the VM area in place. This * Unmap the __init region but leave the VM area in place. This
......
...@@ -42,14 +42,16 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) ...@@ -42,14 +42,16 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
* the temporary mappings we use during restore. * the temporary mappings we use during restore.
*/ */
__set_pte(dst_ptep, pte_mkwrite_novma(pte)); __set_pte(dst_ptep, pte_mkwrite_novma(pte));
} else if ((debug_pagealloc_enabled() || } else if (!pte_none(pte)) {
is_kfence_address((void *)addr)) && !pte_none(pte)) {
/* /*
* debug_pagealloc will removed the PTE_VALID bit if * debug_pagealloc will removed the PTE_VALID bit if
* the page isn't in use by the resume kernel. It may have * the page isn't in use by the resume kernel. It may have
* been in use by the original kernel, in which case we need * been in use by the original kernel, in which case we need
* to put it back in our copy to do the restore. * to put it back in our copy to do the restore.
* *
* Other cases include kfence / vmalloc / memfd_secret which
* may call `set_direct_map_invalid_noflush()`.
*
* Before marking this entry valid, check the pfn should * Before marking this entry valid, check the pfn should
* be mapped. * be mapped.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment