Commit 9d854607 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull second set of arm64 updates from Catalin Marinas:

 - KASLR bug fixes: use callee-saved register, boot-time I-cache
   maintenance

 - inv_entry asm macro fix (EL0 check typo)

 - pr_notice("Virtual kernel memory layout...") splitting

 - Clean-ups: use p?d_set_huge consistently, allow preemption around
   copy_to_user_page, remove unused __local_flush_icache_all()

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: mm: allow preemption in copy_to_user_page
  arm64: consistently use p?d_set_huge
  arm64: kaslr: use callee saved register to preserve SCTLR across C call
  arm64: Split pr_notice("Virtual kernel memory layout...") into multiple pr_cont()
  arm64: drop unused __local_flush_icache_all()
  arm64: fix KASLR boot-time I-cache maintenance
  arm64/kernel: fix incorrect EL0 check in inv_entry macro
parents 8a20a04b 691b1e2e
...@@ -116,13 +116,6 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, ...@@ -116,13 +116,6 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *); extern void flush_dcache_page(struct page *);
static inline void __local_flush_icache_all(void)
{
asm("ic iallu");
dsb(nsh);
isb();
}
static inline void __flush_icache_all(void) static inline void __flush_icache_all(void)
{ {
asm("ic ialluis"); asm("ic ialluis");
......
...@@ -277,7 +277,7 @@ END(vectors) ...@@ -277,7 +277,7 @@ END(vectors)
* Invalid mode handlers * Invalid mode handlers
*/ */
.macro inv_entry, el, reason, regsize = 64 .macro inv_entry, el, reason, regsize = 64
kernel_entry el, \regsize kernel_entry \el, \regsize
mov x0, sp mov x0, sp
mov x1, #\reason mov x1, #\reason
mrs x2, esr_el1 mrs x2, esr_el1
......
...@@ -758,7 +758,7 @@ ENTRY(__early_cpu_boot_status) ...@@ -758,7 +758,7 @@ ENTRY(__early_cpu_boot_status)
*/ */
.section ".idmap.text", "ax" .section ".idmap.text", "ax"
__enable_mmu: __enable_mmu:
mrs x18, sctlr_el1 // preserve old SCTLR_EL1 value mrs x22, sctlr_el1 // preserve old SCTLR_EL1 value
mrs x1, ID_AA64MMFR0_EL1 mrs x1, ID_AA64MMFR0_EL1
ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
...@@ -786,14 +786,15 @@ __enable_mmu: ...@@ -786,14 +786,15 @@ __enable_mmu:
* to take into account by discarding the current kernel mapping and * to take into account by discarding the current kernel mapping and
* creating a new one. * creating a new one.
*/ */
msr sctlr_el1, x18 // disable the MMU msr sctlr_el1, x22 // disable the MMU
isb isb
bl __create_page_tables // recreate kernel mapping bl __create_page_tables // recreate kernel mapping
msr sctlr_el1, x19 // re-enable the MMU msr sctlr_el1, x19 // re-enable the MMU
isb isb
ic ialluis // flush instructions fetched ic iallu // flush instructions fetched
isb // via old mapping dsb nsh // via old mapping
isb
add x27, x27, x23 // relocated __mmap_switched add x27, x27, x23 // relocated __mmap_switched
#endif #endif
br x27 br x27
......
...@@ -58,17 +58,13 @@ static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, ...@@ -58,17 +58,13 @@ static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
* Copy user data from/to a page which is mapped into a different processes * Copy user data from/to a page which is mapped into a different processes
* address space. Really, we want to allow our "user space" model to handle * address space. Really, we want to allow our "user space" model to handle
* this. * this.
*
* Note that this code needs to run on the current CPU.
*/ */
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *dst, const void *src, unsigned long uaddr, void *dst, const void *src,
unsigned long len) unsigned long len)
{ {
preempt_disable();
memcpy(dst, src, len); memcpy(dst, src, len);
flush_ptrace_access(vma, page, uaddr, dst, len); flush_ptrace_access(vma, page, uaddr, dst, len);
preempt_enable();
} }
void __sync_icache_dcache(pte_t pte, unsigned long addr) void __sync_icache_dcache(pte_t pte, unsigned long addr)
......
...@@ -362,40 +362,36 @@ void __init mem_init(void) ...@@ -362,40 +362,36 @@ void __init mem_init(void)
#define MLG(b, t) b, t, ((t) - (b)) >> 30 #define MLG(b, t) b, t, ((t) - (b)) >> 30
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
pr_notice("Virtual kernel memory layout:\n" pr_notice("Virtual kernel memory layout:\n");
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n" pr_cont(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
#endif #endif
" modules : 0x%16lx - 0x%16lx (%6ld MB)\n" pr_cont(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n" MLM(MODULES_VADDR, MODULES_END));
" .text : 0x%p" " - 0x%p" " (%6ld KB)\n" pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
MLG(VMALLOC_START, VMALLOC_END));
pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n" " .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n"
" .init : 0x%p" " - 0x%p" " (%6ld KB)\n" " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
" .data : 0x%p" " - 0x%p" " (%6ld KB)\n" " .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
#ifdef CONFIG_SPARSEMEM_VMEMMAP
" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
" 0x%16lx - 0x%16lx (%6ld MB actual)\n"
#endif
" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n"
" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
#ifdef CONFIG_KASAN
MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
#endif
MLM(MODULES_VADDR, MODULES_END),
MLG(VMALLOC_START, VMALLOC_END),
MLK_ROUNDUP(_text, __start_rodata), MLK_ROUNDUP(_text, __start_rodata),
MLK_ROUNDUP(__start_rodata, _etext), MLK_ROUNDUP(__start_rodata, _etext),
MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_sdata, _edata), MLK_ROUNDUP(_sdata, _edata));
#ifdef CONFIG_SPARSEMEM_VMEMMAP #ifdef CONFIG_SPARSEMEM_VMEMMAP
pr_cont(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
MLG(VMEMMAP_START, MLG(VMEMMAP_START,
VMEMMAP_START + VMEMMAP_SIZE), VMEMMAP_START + VMEMMAP_SIZE),
MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()), MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
(unsigned long)virt_to_page(high_memory)), (unsigned long)virt_to_page(high_memory)));
#endif #endif
MLK(FIXADDR_START, FIXADDR_TOP), pr_cont(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
MLM(PCI_IO_START, PCI_IO_END), MLK(FIXADDR_START, FIXADDR_TOP));
pr_cont(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
MLM(PCI_IO_START, PCI_IO_END));
pr_cont(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
MLM(__phys_to_virt(memblock_start_of_DRAM()), MLM(__phys_to_virt(memblock_start_of_DRAM()),
(unsigned long)high_memory)); (unsigned long)high_memory));
......
...@@ -211,8 +211,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, ...@@ -211,8 +211,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
if (((addr | next | phys) & ~SECTION_MASK) == 0 && if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
block_mappings_allowed(pgtable_alloc)) { block_mappings_allowed(pgtable_alloc)) {
pmd_t old_pmd =*pmd; pmd_t old_pmd =*pmd;
set_pmd(pmd, __pmd(phys | pmd_set_huge(pmd, phys, prot);
pgprot_val(mk_sect_prot(prot))));
/* /*
* Check for previous table entries created during * Check for previous table entries created during
* boot (__create_page_tables) and flush them. * boot (__create_page_tables) and flush them.
...@@ -272,8 +271,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, ...@@ -272,8 +271,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
if (use_1G_block(addr, next, phys) && if (use_1G_block(addr, next, phys) &&
block_mappings_allowed(pgtable_alloc)) { block_mappings_allowed(pgtable_alloc)) {
pud_t old_pud = *pud; pud_t old_pud = *pud;
set_pud(pud, __pud(phys | pud_set_huge(pud, phys, prot);
pgprot_val(mk_sect_prot(prot))));
/* /*
* If we have an old value for a pud, it will * If we have an old value for a pud, it will
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment