Commit 0861fd1c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - fix EFI stub cache maintenance causing aborts during boot on certain
   platforms

 - handle byte stores in __clear_user without panicking

 - fix race condition in aarch64_insn_patch_text_sync() (instruction
   patching)

 - Couple of type fixes

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: ARCH_PFN_OFFSET should be unsigned long
  Correct the race condition in aarch64_insn_patch_text_sync()
  arm64: __clear_user: handle exceptions on strb
  arm64: Fix data type for physical address
  arm64: efi: Fix stub cache maintenance
parents 5ae93760 5fd6690c
...@@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x) ...@@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x)
* virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid * virt_addr_valid(k) indicates whether a virtual address is valid
*/ */
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
......
...@@ -54,18 +54,17 @@ ENTRY(efi_stub_entry) ...@@ -54,18 +54,17 @@ ENTRY(efi_stub_entry)
b.eq efi_load_fail b.eq efi_load_fail
/* /*
* efi_entry() will have relocated the kernel image if necessary * efi_entry() will have copied the kernel image if necessary and we
* and we return here with device tree address in x0 and the kernel * return here with device tree address in x0 and the kernel entry
* entry point stored at *image_addr. Save those values in registers * point stored at *image_addr. Save those values in registers which
* which are callee preserved. * are callee preserved.
*/ */
mov x20, x0 // DTB address mov x20, x0 // DTB address
ldr x0, [sp, #16] // relocated _text address ldr x0, [sp, #16] // relocated _text address
mov x21, x0 mov x21, x0
/* /*
* Flush dcache covering current runtime addresses * Calculate size of the kernel Image (same for original and copy).
* of kernel text/data. Then flush all of icache.
*/ */
adrp x1, _text adrp x1, _text
add x1, x1, #:lo12:_text add x1, x1, #:lo12:_text
...@@ -73,9 +72,24 @@ ENTRY(efi_stub_entry) ...@@ -73,9 +72,24 @@ ENTRY(efi_stub_entry)
add x2, x2, #:lo12:_edata add x2, x2, #:lo12:_edata
sub x1, x2, x1 sub x1, x2, x1
/*
* Flush the copied Image to the PoC, and ensure it is not shadowed by
* stale icache entries from before relocation.
*/
bl __flush_dcache_area bl __flush_dcache_area
ic ialluis ic ialluis
/*
* Ensure that the rest of this function (in the original Image) is
* visible when the caches are disabled. The I-cache can't have stale
* entries for the VA range of the current image, so no maintenance is
* necessary.
*/
adr x0, efi_stub_entry
adr x1, efi_stub_entry_end
sub x1, x1, x0
bl __flush_dcache_area
/* Turn off Dcache and MMU */ /* Turn off Dcache and MMU */
mrs x0, CurrentEL mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2 cmp x0, #CurrentEL_EL2
...@@ -105,4 +119,5 @@ efi_load_fail: ...@@ -105,4 +119,5 @@ efi_load_fail:
ldp x29, x30, [sp], #32 ldp x29, x30, [sp], #32
ret ret
efi_stub_entry_end:
ENDPROC(efi_stub_entry) ENDPROC(efi_stub_entry)
...@@ -163,9 +163,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) ...@@ -163,9 +163,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
* which ends with "dsb; isb" pair guaranteeing global * which ends with "dsb; isb" pair guaranteeing global
* visibility. * visibility.
*/ */
atomic_set(&pp->cpu_count, -1); /* Notify other processors with an additional increment. */
atomic_inc(&pp->cpu_count);
} else { } else {
while (atomic_read(&pp->cpu_count) != -1) while (atomic_read(&pp->cpu_count) <= num_online_cpus())
cpu_relax(); cpu_relax();
isb(); isb();
} }
......
...@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 ) ...@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 )
sub x1, x1, #2 sub x1, x1, #2
4: adds x1, x1, #1 4: adds x1, x1, #1
b.mi 5f b.mi 5f
strb wzr, [x0] USER(9f, strb wzr, [x0] )
5: mov x0, #0 5: mov x0, #0
ret ret
ENDPROC(__clear_user) ENDPROC(__clear_user)
......
...@@ -202,7 +202,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, ...@@ -202,7 +202,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
} }
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
unsigned long end, unsigned long phys, unsigned long end, phys_addr_t phys,
int map_io) int map_io)
{ {
pud_t *pud; pud_t *pud;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment