Commit 0e15c3c7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-6.1-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:

 - build fix for the NR_CPUS Kconfig SBI version dependency

 - fixes to early memory initialization, to fix page permissions in EFI
   and post-initmem-free

 - build fix for the VDSO, to avoid trying to profile the VDSO functions

 - fixes for kexec crash handling, to fix multi-core and interrupt
   related initialization inside the crash kernel

 - fix for a race condition when handling multiple concurrect kernel
   stack overflows

* tag 'riscv-for-linus-6.1-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: kexec: Fixup crash_smp_send_stop without multi cores
  riscv: kexec: Fixup irq controller broken in kexec crash path
  riscv: mm: Proper page permissions after initmem free
  riscv: vdso: fix section overlapping under some conditions
  riscv: fix race when vmap stack overflow
  riscv: Sync efi page table's kernel mappings before switching
  riscv: Fix NR_CPUS range conditions
parents 2df2adc3 39cefc5f
...@@ -317,9 +317,9 @@ config SMP ...@@ -317,9 +317,9 @@ config SMP
config NR_CPUS config NR_CPUS
int "Maximum number of CPUs (2-512)" int "Maximum number of CPUs (2-512)"
depends on SMP depends on SMP
range 2 512 if !SBI_V01 range 2 512 if !RISCV_SBI_V01
range 2 32 if SBI_V01 && 32BIT range 2 32 if RISCV_SBI_V01 && 32BIT
range 2 64 if SBI_V01 && 64BIT range 2 64 if RISCV_SBI_V01 && 64BIT
default "32" if 32BIT default "32" if 32BIT
default "64" if 64BIT default "64" if 64BIT
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define REG_L __REG_SEL(ld, lw) #define REG_L __REG_SEL(ld, lw)
#define REG_S __REG_SEL(sd, sw) #define REG_S __REG_SEL(sd, sw)
#define REG_SC __REG_SEL(sc.d, sc.w) #define REG_SC __REG_SEL(sc.d, sc.w)
#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq)
#define REG_ASM __REG_SEL(.dword, .word) #define REG_ASM __REG_SEL(.dword, .word)
#define SZREG __REG_SEL(8, 4) #define SZREG __REG_SEL(8, 4)
#define LGREG __REG_SEL(3, 2) #define LGREG __REG_SEL(3, 2)
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#ifdef CONFIG_EFI #ifdef CONFIG_EFI
extern void efi_init(void); extern void efi_init(void);
...@@ -20,7 +21,10 @@ extern void efi_init(void); ...@@ -20,7 +21,10 @@ extern void efi_init(void);
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_setup() efi_virtmap_load() #define arch_efi_call_virt_setup() ({ \
sync_kernel_mappings(efi_mm.pgd); \
efi_virtmap_load(); \
})
#define arch_efi_call_virt_teardown() efi_virtmap_unload() #define arch_efi_call_virt_teardown() efi_virtmap_unload()
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE) #define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
......
...@@ -127,6 +127,13 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) ...@@ -127,6 +127,13 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d) #define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
static inline void sync_kernel_mappings(pgd_t *pgd)
{
memcpy(pgd + USER_PTRS_PER_PGD,
init_mm.pgd + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *pgd; pgd_t *pgd;
...@@ -135,9 +142,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) ...@@ -135,9 +142,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (likely(pgd != NULL)) { if (likely(pgd != NULL)) {
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
/* Copy kernel mappings */ /* Copy kernel mappings */
memcpy(pgd + USER_PTRS_PER_PGD, sync_kernel_mappings(pgd);
init_mm.pgd + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
} }
return pgd; return pgd;
} }
......
...@@ -50,6 +50,9 @@ void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops); ...@@ -50,6 +50,9 @@ void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops);
/* Clear IPI for current CPU */ /* Clear IPI for current CPU */
void riscv_clear_ipi(void); void riscv_clear_ipi(void);
/* Check other CPUs stop or not */
bool smp_crash_stop_failed(void);
/* Secondary hart entry */ /* Secondary hart entry */
asmlinkage void smp_callin(void); asmlinkage void smp_callin(void);
......
...@@ -404,6 +404,19 @@ handle_syscall_trace_exit: ...@@ -404,6 +404,19 @@ handle_syscall_trace_exit:
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
handle_kernel_stack_overflow: handle_kernel_stack_overflow:
/*
* Takes the psuedo-spinlock for the shadow stack, in case multiple
* harts are concurrently overflowing their kernel stacks. We could
* store any value here, but since we're overflowing the kernel stack
* already we only have SP to use as a scratch register. So we just
* swap in the address of the spinlock, as that's definately non-zero.
*
* Pairs with a store_release in handle_bad_stack().
*/
1: la sp, spin_shadow_stack
REG_AMOSWAP_AQ sp, sp, (sp)
bnez sp, 1b
la sp, shadow_stack la sp, shadow_stack
addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <linux/compiler.h> /* For unreachable() */ #include <linux/compiler.h> /* For unreachable() */
#include <linux/cpu.h> /* For cpu_down() */ #include <linux/cpu.h> /* For cpu_down() */
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
/* /*
* kexec_image_info - Print received image details * kexec_image_info - Print received image details
...@@ -138,20 +140,35 @@ void machine_shutdown(void) ...@@ -138,20 +140,35 @@ void machine_shutdown(void)
#endif #endif
} }
/* Override the weak function in kernel/panic.c */ static void machine_kexec_mask_interrupts(void)
void crash_smp_send_stop(void)
{ {
static int cpus_stopped; unsigned int i;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
struct irq_chip *chip;
int ret;
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
/* /*
* This function can be called twice in panic path, but obviously * First try to remove the active state. If this
* we execute this only once. * fails, try to EOI the interrupt.
*/ */
if (cpus_stopped) ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
return;
if (ret && irqd_irq_inprogress(&desc->irq_data) &&
chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
if (chip->irq_mask)
chip->irq_mask(&desc->irq_data);
smp_send_stop(); if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
cpus_stopped = 1; chip->irq_disable(&desc->irq_data);
}
} }
/* /*
...@@ -169,6 +186,8 @@ machine_crash_shutdown(struct pt_regs *regs) ...@@ -169,6 +186,8 @@ machine_crash_shutdown(struct pt_regs *regs)
crash_smp_send_stop(); crash_smp_send_stop();
crash_save_cpu(regs, smp_processor_id()); crash_save_cpu(regs, smp_processor_id());
machine_kexec_mask_interrupts();
pr_info("Starting crashdump kernel...\n"); pr_info("Starting crashdump kernel...\n");
} }
...@@ -195,6 +214,11 @@ machine_kexec(struct kimage *image) ...@@ -195,6 +214,11 @@ machine_kexec(struct kimage *image)
void *control_code_buffer = page_address(image->control_code_page); void *control_code_buffer = page_address(image->control_code_page);
riscv_kexec_method kexec_method = NULL; riscv_kexec_method kexec_method = NULL;
#ifdef CONFIG_SMP
WARN(smp_crash_stop_failed(),
"Some CPUs may be stale, kdump will be unreliable.\n");
#endif
if (image->type != KEXEC_TYPE_CRASH) if (image->type != KEXEC_TYPE_CRASH)
kexec_method = control_code_buffer; kexec_method = control_code_buffer;
else else
......
...@@ -322,10 +322,11 @@ subsys_initcall(topology_init); ...@@ -322,10 +322,11 @@ subsys_initcall(topology_init);
void free_initmem(void) void free_initmem(void)
{ {
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), set_memory_rw_nx);
IS_ENABLED(CONFIG_64BIT) ? if (IS_ENABLED(CONFIG_64BIT))
set_memory_rw : set_memory_rw_nx); set_kernel_memory(__init_begin, __init_end, set_memory_nx);
}
free_initmem_default(POISON_FREE_INITMEM); free_initmem_default(POISON_FREE_INITMEM);
} }
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kexec.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -22,11 +23,13 @@ ...@@ -22,11 +23,13 @@
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
enum ipi_message_type { enum ipi_message_type {
IPI_RESCHEDULE, IPI_RESCHEDULE,
IPI_CALL_FUNC, IPI_CALL_FUNC,
IPI_CPU_STOP, IPI_CPU_STOP,
IPI_CPU_CRASH_STOP,
IPI_IRQ_WORK, IPI_IRQ_WORK,
IPI_TIMER, IPI_TIMER,
IPI_MAX IPI_MAX
...@@ -71,6 +74,32 @@ static void ipi_stop(void) ...@@ -71,6 +74,32 @@ static void ipi_stop(void)
wait_for_interrupt(); wait_for_interrupt();
} }
#ifdef CONFIG_KEXEC_CORE
static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
{
crash_save_cpu(regs, cpu);
atomic_dec(&waiting_for_crash_ipi);
local_irq_disable();
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_has_hotplug(cpu))
cpu_ops[cpu]->cpu_stop();
#endif
for(;;)
wait_for_interrupt();
}
#else
static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
{
unreachable();
}
#endif
static const struct riscv_ipi_ops *ipi_ops __ro_after_init; static const struct riscv_ipi_ops *ipi_ops __ro_after_init;
void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
...@@ -124,8 +153,9 @@ void arch_irq_work_raise(void) ...@@ -124,8 +153,9 @@ void arch_irq_work_raise(void)
void handle_IPI(struct pt_regs *regs) void handle_IPI(struct pt_regs *regs)
{ {
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; unsigned int cpu = smp_processor_id();
unsigned long *stats = ipi_data[smp_processor_id()].stats; unsigned long *pending_ipis = &ipi_data[cpu].bits;
unsigned long *stats = ipi_data[cpu].stats;
riscv_clear_ipi(); riscv_clear_ipi();
...@@ -154,6 +184,10 @@ void handle_IPI(struct pt_regs *regs) ...@@ -154,6 +184,10 @@ void handle_IPI(struct pt_regs *regs)
ipi_stop(); ipi_stop();
} }
if (ops & (1 << IPI_CPU_CRASH_STOP)) {
ipi_cpu_crash_stop(cpu, get_irq_regs());
}
if (ops & (1 << IPI_IRQ_WORK)) { if (ops & (1 << IPI_IRQ_WORK)) {
stats[IPI_IRQ_WORK]++; stats[IPI_IRQ_WORK]++;
irq_work_run(); irq_work_run();
...@@ -176,6 +210,7 @@ static const char * const ipi_names[] = { ...@@ -176,6 +210,7 @@ static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts", [IPI_CALL_FUNC] = "Function call interrupts",
[IPI_CPU_STOP] = "CPU stop interrupts", [IPI_CPU_STOP] = "CPU stop interrupts",
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_TIMER] = "Timer broadcast interrupts", [IPI_TIMER] = "Timer broadcast interrupts",
}; };
...@@ -235,6 +270,64 @@ void smp_send_stop(void) ...@@ -235,6 +270,64 @@ void smp_send_stop(void)
cpumask_pr_args(cpu_online_mask)); cpumask_pr_args(cpu_online_mask));
} }
#ifdef CONFIG_KEXEC_CORE
/*
* The number of CPUs online, not counting this CPU (which may not be
* fully online and so not counted in num_online_cpus()).
*/
static inline unsigned int num_other_online_cpus(void)
{
unsigned int this_cpu_online = cpu_online(smp_processor_id());
return num_online_cpus() - this_cpu_online;
}
void crash_smp_send_stop(void)
{
static int cpus_stopped;
cpumask_t mask;
unsigned long timeout;
/*
* This function can be called twice in panic path, but obviously
* we execute this only once.
*/
if (cpus_stopped)
return;
cpus_stopped = 1;
/*
* If this cpu is the only one alive at this point in time, online or
* not, there are no stop messages to be sent around, so just back out.
*/
if (num_other_online_cpus() == 0)
return;
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
pr_crit("SMP: stopping secondary CPUs\n");
send_ipi_mask(&mask, IPI_CPU_CRASH_STOP);
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
udelay(1);
if (atomic_read(&waiting_for_crash_ipi) > 0)
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(&mask));
}
bool smp_crash_stop_failed(void)
{
return (atomic_read(&waiting_for_crash_ipi) > 0);
}
#endif
void smp_send_reschedule(int cpu) void smp_send_reschedule(int cpu)
{ {
send_ipi_single(cpu, IPI_RESCHEDULE); send_ipi_single(cpu, IPI_RESCHEDULE);
......
...@@ -221,11 +221,29 @@ asmlinkage unsigned long get_overflow_stack(void) ...@@ -221,11 +221,29 @@ asmlinkage unsigned long get_overflow_stack(void)
OVERFLOW_STACK_SIZE; OVERFLOW_STACK_SIZE;
} }
/*
* A pseudo spinlock to protect the shadow stack from being used by multiple
* harts concurrently. This isn't a real spinlock because the lock side must
* be taken without a valid stack and only a single register, it's only taken
* while in the process of panicing anyway so the performance and error
* checking a proper spinlock gives us doesn't matter.
*/
unsigned long spin_shadow_stack;
asmlinkage void handle_bad_stack(struct pt_regs *regs) asmlinkage void handle_bad_stack(struct pt_regs *regs)
{ {
unsigned long tsk_stk = (unsigned long)current->stack; unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
/*
* We're done with the shadow stack by this point, as we're on the
* overflow stack. Tell any other concurrent overflowing harts that
* they can proceed with panicing by releasing the pseudo-spinlock.
*
* This pairs with an amoswap.aq in handle_kernel_stack_overflow.
*/
smp_store_release(&spin_shadow_stack, 0);
console_verbose(); console_verbose();
pr_emerg("Insufficient stack space to handle exception!\n"); pr_emerg("Insufficient stack space to handle exception!\n");
......
...@@ -17,6 +17,7 @@ vdso-syms += flush_icache ...@@ -17,6 +17,7 @@ vdso-syms += flush_icache
obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
ccflags-y := -fno-stack-protector ccflags-y := -fno-stack-protector
ccflags-y += -DDISABLE_BRANCH_PROFILING
ifneq ($(c-gettimeofday-y),) ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y) CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment