Commit 2875fe05 authored by Atish Patra's avatar Atish Patra Committed by Palmer Dabbelt

RISC-V: Add cpu_ops and modify default booting method

Currently, all non-booting harts start booting after the booting hart
updates the per-hart stack pointer. This is done in a way that, it's
difficult to implement any other booting method without breaking the
backward compatibility.

Define a cpu_ops method that allows to introduce other booting methods
in future. Modify the current booting method to be compatible with
cpu_ops.
Signed-off-by: default avatarAtish Patra <atish.patra@wdc.com>
Reviewed-by: default avatarAnup Patel <anup@brainfault.org>
Signed-off-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent e011995e
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
* Based on arch/arm64/include/asm/cpu_ops.h
*/
#ifndef __ASM_CPU_OPS_H
#define __ASM_CPU_OPS_H
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/threads.h>
/**
* struct cpu_operations - Callback operations for hotplugging CPUs.
*
* @name: Name of the boot protocol.
* @cpu_prepare: Early one-time preparation step for a cpu. If there
* is a mechanism for doing so, tests whether it is
* possible to boot the given HART.
* @cpu_start: Boots a cpu into the kernel.
*/
struct cpu_operations {
const char *name;
int (*cpu_prepare)(unsigned int cpu);
int (*cpu_start)(unsigned int cpu,
struct task_struct *tidle);
};
extern const struct cpu_operations *cpu_ops[NR_CPUS];
void __init cpu_set_ops(int cpu);
void cpu_update_secondary_bootdata(unsigned int cpuid,
struct task_struct *tidle);
#endif /* ifndef __ASM_CPU_OPS_H */
...@@ -34,6 +34,8 @@ obj-$(CONFIG_RISCV_M_MODE) += clint.o ...@@ -34,6 +34,8 @@ obj-$(CONFIG_RISCV_M_MODE) += clint.o
obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += cpu_ops.o
obj-$(CONFIG_SMP) += cpu_ops_spinwait.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*/
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/cpu_ops.h>
#include <asm/sbi.h>
#include <asm/smp.h>
const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
void *__cpu_up_stack_pointer[NR_CPUS];
void *__cpu_up_task_pointer[NR_CPUS];
extern const struct cpu_operations cpu_ops_spinwait;
void cpu_update_secondary_bootdata(unsigned int cpuid,
struct task_struct *tidle)
{
int hartid = cpuid_to_hartid_map(cpuid);
/* Make sure tidle is updated */
smp_mb();
WRITE_ONCE(__cpu_up_stack_pointer[hartid],
task_stack_page(tidle) + THREAD_SIZE);
WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);
}
void __init cpu_set_ops(int cpuid)
{
cpu_ops[cpuid] = &cpu_ops_spinwait;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*/
#include <linux/errno.h>
#include <linux/of.h>
#include <linux/string.h>
#include <asm/cpu_ops.h>
#include <asm/sbi.h>
#include <asm/smp.h>
const struct cpu_operations cpu_ops_spinwait;
static int spinwait_cpu_prepare(unsigned int cpuid)
{
if (!cpu_ops_spinwait.cpu_start) {
pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
return -ENODEV;
}
return 0;
}
static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
{
/*
* In this protocol, all cpus boot on their own accord. _start
* selects the first cpu to boot the kernel and causes the remainder
* of the cpus to spin in a loop waiting for their stack pointer to be
* setup by that main cpu. Writing to bootdata
* (i.e __cpu_up_stack_pointer) signals to the spinning cpus that they
* can continue the boot process.
*/
cpu_update_secondary_bootdata(cpuid, tidle);
return 0;
}
const struct cpu_operations cpu_ops_spinwait = {
.name = "spinwait",
.cpu_prepare = spinwait_cpu_prepare,
.cpu_start = spinwait_cpu_start,
};
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <asm/clint.h> #include <asm/clint.h>
#include <asm/cpu_ops.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -34,8 +35,6 @@ ...@@ -34,8 +35,6 @@
#include "head.h" #include "head.h"
void *__cpu_up_stack_pointer[NR_CPUS];
void *__cpu_up_task_pointer[NR_CPUS];
static DECLARE_COMPLETION(cpu_running); static DECLARE_COMPLETION(cpu_running);
void __init smp_prepare_boot_cpu(void) void __init smp_prepare_boot_cpu(void)
...@@ -46,6 +45,7 @@ void __init smp_prepare_boot_cpu(void) ...@@ -46,6 +45,7 @@ void __init smp_prepare_boot_cpu(void)
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
int cpuid; int cpuid;
int ret;
/* This covers non-smp usecase mandated by "nosmp" option */ /* This covers non-smp usecase mandated by "nosmp" option */
if (max_cpus == 0) if (max_cpus == 0)
...@@ -54,6 +54,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -54,6 +54,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for_each_possible_cpu(cpuid) { for_each_possible_cpu(cpuid) {
if (cpuid == smp_processor_id()) if (cpuid == smp_processor_id())
continue; continue;
if (cpu_ops[cpuid]->cpu_prepare) {
ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
if (ret)
continue;
}
set_cpu_present(cpuid, true); set_cpu_present(cpuid, true);
} }
} }
...@@ -65,6 +70,8 @@ void __init setup_smp(void) ...@@ -65,6 +70,8 @@ void __init setup_smp(void)
bool found_boot_cpu = false; bool found_boot_cpu = false;
int cpuid = 1; int cpuid = 1;
cpu_set_ops(0);
for_each_of_cpu_node(dn) { for_each_of_cpu_node(dn) {
hart = riscv_of_processor_hartid(dn); hart = riscv_of_processor_hartid(dn);
if (hart < 0) if (hart < 0)
...@@ -92,29 +99,28 @@ void __init setup_smp(void) ...@@ -92,29 +99,28 @@ void __init setup_smp(void)
cpuid, nr_cpu_ids); cpuid, nr_cpu_ids);
for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) { for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
cpu_set_ops(cpuid);
set_cpu_possible(cpuid, true); set_cpu_possible(cpuid, true);
} }
}
}
int start_secondary_cpu(int cpu, struct task_struct *tidle)
{
if (cpu_ops[cpu]->cpu_start)
return cpu_ops[cpu]->cpu_start(cpu, tidle);
return -EOPNOTSUPP;
} }
int __cpu_up(unsigned int cpu, struct task_struct *tidle) int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{ {
int ret = 0; int ret = 0;
int hartid = cpuid_to_hartid_map(cpu);
tidle->thread_info.cpu = cpu; tidle->thread_info.cpu = cpu;
/* ret = start_secondary_cpu(cpu, tidle);
* On RISC-V systems, all harts boot on their own accord. Our _start if (!ret) {
* selects the first hart to boot the kernel and causes the remainder
* of the harts to spin in a loop waiting for their stack pointer to be
* setup by that main hart. Writing __cpu_up_stack_pointer signals to
* the spinning harts that they can continue the boot process.
*/
smp_mb();
WRITE_ONCE(__cpu_up_stack_pointer[hartid],
task_stack_page(tidle) + THREAD_SIZE);
WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);
lockdep_assert_held(&cpu_running); lockdep_assert_held(&cpu_running);
wait_for_completion_timeout(&cpu_running, wait_for_completion_timeout(&cpu_running,
msecs_to_jiffies(1000)); msecs_to_jiffies(1000));
...@@ -123,6 +129,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) ...@@ -123,6 +129,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
pr_crit("CPU%u: failed to come online\n", cpu); pr_crit("CPU%u: failed to come online\n", cpu);
ret = -EIO; ret = -EIO;
} }
} else {
pr_crit("CPU%u: failed to start\n", cpu);
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment