Commit 98884281 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Catalin Marinas:

 - ZONE_DMA32 initialisation fix when memblocks fall entirely within the
   first GB (used by ZONE_DMA in 5.5 for Raspberry Pi 4).

 - Couple of ftrace fixes following the FTRACE_WITH_REGS patchset.

 - access_ok() fix for the Tagged Address ABI when called from from a
   kernel thread (asynchronous I/O): the kthread does not have the TIF
   flags of the mm owner, so untag the user address unconditionally.

 - KVM compute_layout() called before the alternatives code patching.

 - Minor clean-ups.

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: entry: refine comment of stack overflow check
  arm64: ftrace: fix ifdeffery
  arm64: KVM: Invoke compute_layout() before alternatives are applied
  arm64: Validate tagged addresses in access_ok() called from kernel threads
  arm64: mm: Fix column alignment for UXN in kernel_page_tables
  arm64: insn: consistently handle exit text
  arm64: mm: Fix initialisation of DMA zones on non-NUMA systems
parents 76f6777c de858040
...@@ -91,6 +91,7 @@ alternative_cb_end ...@@ -91,6 +91,7 @@ alternative_cb_end
void kvm_update_va_mask(struct alt_instr *alt, void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst); __le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_layout(void);
static inline unsigned long __kern_hyp_va(unsigned long v) static inline unsigned long __kern_hyp_va(unsigned long v)
{ {
......
...@@ -15,6 +15,7 @@ extern char __hyp_text_start[], __hyp_text_end[]; ...@@ -15,6 +15,7 @@ extern char __hyp_text_start[], __hyp_text_end[];
extern char __idmap_text_start[], __idmap_text_end[]; extern char __idmap_text_start[], __idmap_text_end[];
extern char __initdata_begin[], __initdata_end[]; extern char __initdata_begin[], __initdata_end[];
extern char __inittext_begin[], __inittext_end[]; extern char __inittext_begin[], __inittext_end[];
extern char __exittext_begin[], __exittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[]; extern char __mmuoff_data_start[], __mmuoff_data_end[];
extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
......
...@@ -62,8 +62,13 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si ...@@ -62,8 +62,13 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
{ {
unsigned long ret, limit = current_thread_info()->addr_limit; unsigned long ret, limit = current_thread_info()->addr_limit;
/*
* Asynchronous I/O running in a kernel thread does not have the
* TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
* the user address before checking.
*/
if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
test_thread_flag(TIF_TAGGED_ADDR)) (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
addr = untagged_addr(addr); addr = untagged_addr(addr);
__chk_user_ptr(addr); __chk_user_ptr(addr);
......
...@@ -133,7 +133,6 @@ ENTRY(ftrace_graph_caller) ...@@ -133,7 +133,6 @@ ENTRY(ftrace_graph_caller)
bl prepare_ftrace_return bl prepare_ftrace_return
b ftrace_common_return b ftrace_common_return
ENDPROC(ftrace_graph_caller) ENDPROC(ftrace_graph_caller)
#else
#endif #endif
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
...@@ -287,6 +286,7 @@ GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); ...@@ -287,6 +286,7 @@ GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
mcount_exit mcount_exit
ENDPROC(ftrace_caller) ENDPROC(ftrace_caller)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* /*
...@@ -307,7 +307,6 @@ ENTRY(ftrace_graph_caller) ...@@ -307,7 +307,6 @@ ENTRY(ftrace_graph_caller)
mcount_exit mcount_exit
ENDPROC(ftrace_graph_caller) ENDPROC(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
ENTRY(ftrace_stub) ENTRY(ftrace_stub)
......
...@@ -76,7 +76,8 @@ alternative_else_nop_endif ...@@ -76,7 +76,8 @@ alternative_else_nop_endif
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
/* /*
* Test whether the SP has overflowed, without corrupting a GPR. * Test whether the SP has overflowed, without corrupting a GPR.
* Task and IRQ stacks are aligned to (1 << THREAD_SHIFT). * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
* should always be zero.
*/ */
add sp, sp, x0 // sp' = sp + x0 add sp, sp, x0 // sp' = sp + x0
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/kprobes.h> #include <asm/kprobes.h>
#include <asm/sections.h>
#define AARCH64_INSN_SF_BIT BIT(31) #define AARCH64_INSN_SF_BIT BIT(31)
#define AARCH64_INSN_N_BIT BIT(22) #define AARCH64_INSN_N_BIT BIT(22)
...@@ -78,16 +79,29 @@ bool aarch64_insn_is_branch_imm(u32 insn) ...@@ -78,16 +79,29 @@ bool aarch64_insn_is_branch_imm(u32 insn)
static DEFINE_RAW_SPINLOCK(patch_lock); static DEFINE_RAW_SPINLOCK(patch_lock);
static bool is_exit_text(unsigned long addr)
{
/* discarded with init text/data */
return system_state < SYSTEM_RUNNING &&
addr >= (unsigned long)__exittext_begin &&
addr < (unsigned long)__exittext_end;
}
static bool is_image_text(unsigned long addr)
{
return core_kernel_text(addr) || is_exit_text(addr);
}
static void __kprobes *patch_map(void *addr, int fixmap) static void __kprobes *patch_map(void *addr, int fixmap)
{ {
unsigned long uintaddr = (uintptr_t) addr; unsigned long uintaddr = (uintptr_t) addr;
bool module = !core_kernel_text(uintaddr); bool image = is_image_text(uintaddr);
struct page *page; struct page *page;
if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) if (image)
page = vmalloc_to_page(addr);
else if (!module)
page = phys_to_page(__pa_symbol(addr)); page = phys_to_page(__pa_symbol(addr));
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
page = vmalloc_to_page(addr);
else else
return addr; return addr;
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/irq_work.h> #include <linux/irq_work.h>
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/kvm_host.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/atomic.h> #include <asm/atomic.h>
...@@ -39,6 +40,7 @@ ...@@ -39,6 +40,7 @@
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
#include <asm/daifflags.h> #include <asm/daifflags.h>
#include <asm/kvm_mmu.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -407,6 +409,8 @@ static void __init hyp_mode_check(void) ...@@ -407,6 +409,8 @@ static void __init hyp_mode_check(void)
"CPU: CPUs started in inconsistent modes"); "CPU: CPUs started in inconsistent modes");
else else
pr_info("CPU: All CPU(s) started at EL1\n"); pr_info("CPU: All CPU(s) started at EL1\n");
if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
kvm_compute_layout();
} }
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
......
...@@ -158,9 +158,12 @@ SECTIONS ...@@ -158,9 +158,12 @@ SECTIONS
__inittext_begin = .; __inittext_begin = .;
INIT_TEXT_SECTION(8) INIT_TEXT_SECTION(8)
__exittext_begin = .;
.exit.text : { .exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT) ARM_EXIT_KEEP(EXIT_TEXT)
} }
__exittext_end = .;
. = ALIGN(4); . = ALIGN(4);
.altinstructions : { .altinstructions : {
......
...@@ -22,7 +22,7 @@ static u8 tag_lsb; ...@@ -22,7 +22,7 @@ static u8 tag_lsb;
static u64 tag_val; static u64 tag_val;
static u64 va_mask; static u64 va_mask;
static void compute_layout(void) __init void kvm_compute_layout(void)
{ {
phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start); phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
u64 hyp_va_msb; u64 hyp_va_msb;
...@@ -110,9 +110,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt, ...@@ -110,9 +110,6 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
BUG_ON(nr_inst != 5); BUG_ON(nr_inst != 5);
if (!has_vhe() && !va_mask)
compute_layout();
for (i = 0; i < nr_inst; i++) { for (i = 0; i < nr_inst; i++) {
u32 rd, rn, insn, oinsn; u32 rd, rn, insn, oinsn;
...@@ -156,9 +153,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt, ...@@ -156,9 +153,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
return; return;
} }
if (!va_mask)
compute_layout();
/* /*
* Compute HYP VA by using the same computation as kern_hyp_va() * Compute HYP VA by using the same computation as kern_hyp_va()
*/ */
......
...@@ -142,6 +142,7 @@ static const struct prot_bits pte_bits[] = { ...@@ -142,6 +142,7 @@ static const struct prot_bits pte_bits[] = {
.mask = PTE_UXN, .mask = PTE_UXN,
.val = PTE_UXN, .val = PTE_UXN,
.set = "UXN", .set = "UXN",
.clear = " ",
}, { }, {
.mask = PTE_ATTRINDX_MASK, .mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_DEVICE_nGnRnE), .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
......
...@@ -214,15 +214,14 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) ...@@ -214,15 +214,14 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{ {
struct memblock_region *reg; struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
unsigned long max_dma32 = min; unsigned long __maybe_unused max_dma, max_dma32;
unsigned long __maybe_unused max_dma = min;
memset(zone_size, 0, sizeof(zone_size)); memset(zone_size, 0, sizeof(zone_size));
max_dma = max_dma32 = min;
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
max_dma = PFN_DOWN(arm64_dma_phys_limit); max_dma = max_dma32 = PFN_DOWN(arm64_dma_phys_limit);
zone_size[ZONE_DMA] = max_dma - min; zone_size[ZONE_DMA] = max_dma - min;
max_dma32 = max_dma;
#endif #endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
max_dma32 = PFN_DOWN(arm64_dma32_phys_limit); max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
...@@ -236,25 +235,23 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) ...@@ -236,25 +235,23 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg);
if (start >= max)
continue;
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
if (start < max_dma) { if (start >= min && start < max_dma) {
unsigned long dma_end = min_not_zero(end, max_dma); unsigned long dma_end = min(end, max_dma);
zhole_size[ZONE_DMA] -= dma_end - start; zhole_size[ZONE_DMA] -= dma_end - start;
start = dma_end;
} }
#endif #endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32
if (start < max_dma32) { if (start >= max_dma && start < max_dma32) {
unsigned long dma32_end = min(end, max_dma32); unsigned long dma32_end = min(end, max_dma32);
unsigned long dma32_start = max(start, max_dma); zhole_size[ZONE_DMA32] -= dma32_end - start;
zhole_size[ZONE_DMA32] -= dma32_end - dma32_start; start = dma32_end;
} }
#endif #endif
if (end > max_dma32) { if (start >= max_dma32 && start < max) {
unsigned long normal_end = min(end, max); unsigned long normal_end = min(end, max);
unsigned long normal_start = max(start, max_dma32); zhole_size[ZONE_NORMAL] -= normal_end - start;
zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment