Commit cc7f3f72 authored by Anup Patel's avatar Anup Patel Committed by Palmer Dabbelt

RISC-V: Add mechanism to provide custom IPI operations

We add mechanism to set custom IPI operations so that CLINT driver
from drivers directory can provide custom IPI operations.
Signed-off-by: default avatarAnup Patel <anup.patel@wdc.com>
Tested-by: default avatarEmil Renner Berhing <kernel@esmil.dk>
Reviewed-by: default avatarAtish Patra <atish.patra@wdc.com>
Reviewed-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
Signed-off-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent 9123e3a7
...@@ -6,34 +6,9 @@ ...@@ -6,34 +6,9 @@
#include <linux/smp.h> #include <linux/smp.h>
#ifdef CONFIG_RISCV_M_MODE #ifdef CONFIG_RISCV_M_MODE
extern u32 __iomem *clint_ipi_base;
void clint_init_boot_cpu(void); void clint_init_boot_cpu(void);
static inline void clint_send_ipi_single(unsigned long hartid)
{
writel(1, clint_ipi_base + hartid);
}
static inline void clint_send_ipi_mask(const struct cpumask *mask)
{
int cpu;
for_each_cpu(cpu, mask)
clint_send_ipi_single(cpuid_to_hartid_map(cpu));
}
static inline void clint_clear_ipi(unsigned long hartid)
{
writel(0, clint_ipi_base + hartid);
}
#else /* CONFIG_RISCV_M_MODE */ #else /* CONFIG_RISCV_M_MODE */
#define clint_init_boot_cpu() do { } while (0) #define clint_init_boot_cpu() do { } while (0)
/* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */
void clint_send_ipi_single(unsigned long hartid);
void clint_send_ipi_mask(const struct cpumask *hartid_mask);
void clint_clear_ipi(unsigned long hartid);
#endif /* CONFIG_RISCV_M_MODE */ #endif /* CONFIG_RISCV_M_MODE */
#endif /* _ASM_RISCV_CLINT_H */ #endif /* _ASM_RISCV_CLINT_H */
...@@ -15,6 +15,11 @@ ...@@ -15,6 +15,11 @@
struct seq_file; struct seq_file;
extern unsigned long boot_cpu_hartid; extern unsigned long boot_cpu_hartid;
struct riscv_ipi_ops {
void (*ipi_inject)(const struct cpumask *target);
void (*ipi_clear)(void);
};
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* Mapping between linux logical cpu index and hartid. * Mapping between linux logical cpu index and hartid.
...@@ -40,6 +45,12 @@ void arch_send_call_function_single_ipi(int cpu); ...@@ -40,6 +45,12 @@ void arch_send_call_function_single_ipi(int cpu);
int riscv_hartid_to_cpuid(int hartid); int riscv_hartid_to_cpuid(int hartid);
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out); void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
/* Set custom IPI operations */
void riscv_set_ipi_ops(struct riscv_ipi_ops *ops);
/* Clear IPI for current CPU */
void riscv_clear_ipi(void);
/* Secondary hart entry */ /* Secondary hart entry */
asmlinkage void smp_callin(void); asmlinkage void smp_callin(void);
...@@ -81,6 +92,14 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in, ...@@ -81,6 +92,14 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
cpumask_set_cpu(boot_cpu_hartid, out); cpumask_set_cpu(boot_cpu_hartid, out);
} }
static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
{
}
static inline void riscv_clear_ipi(void)
{
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP) #if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
......
...@@ -5,11 +5,11 @@ ...@@ -5,11 +5,11 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/smp.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/clint.h> #include <asm/clint.h>
#include <asm/csr.h> #include <asm/csr.h>
#include <asm/timex.h> #include <asm/timex.h>
#include <asm/smp.h>
/* /*
* This is the layout used by the SiFive clint, which is also shared by the qemu * This is the layout used by the SiFive clint, which is also shared by the qemu
...@@ -21,6 +21,24 @@ ...@@ -21,6 +21,24 @@
u32 __iomem *clint_ipi_base; u32 __iomem *clint_ipi_base;
static void clint_send_ipi(const struct cpumask *target)
{
unsigned int cpu;
for_each_cpu(cpu, target)
writel(1, clint_ipi_base + cpuid_to_hartid_map(cpu));
}
static void clint_clear_ipi(void)
{
writel(0, clint_ipi_base + cpuid_to_hartid_map(smp_processor_id()));
}
static struct riscv_ipi_ops clint_ipi_ops = {
.ipi_inject = clint_send_ipi,
.ipi_clear = clint_clear_ipi,
};
void clint_init_boot_cpu(void) void clint_init_boot_cpu(void)
{ {
struct device_node *np; struct device_node *np;
...@@ -40,5 +58,6 @@ void clint_init_boot_cpu(void) ...@@ -40,5 +58,6 @@ void clint_init_boot_cpu(void)
riscv_time_cmp = base + CLINT_TIME_CMP_OFF; riscv_time_cmp = base + CLINT_TIME_CMP_OFF;
riscv_time_val = base + CLINT_TIME_VAL_OFF; riscv_time_val = base + CLINT_TIME_VAL_OFF;
clint_clear_ipi(boot_cpu_hartid); clint_clear_ipi();
riscv_set_ipi_ops(&clint_ipi_ops);
} }
...@@ -547,6 +547,18 @@ static inline long sbi_get_firmware_version(void) ...@@ -547,6 +547,18 @@ static inline long sbi_get_firmware_version(void)
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION); return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
} }
static void sbi_send_cpumask_ipi(const struct cpumask *target)
{
struct cpumask hartid_mask;
riscv_cpuid_to_hartid_mask(target, &hartid_mask);
sbi_send_ipi(cpumask_bits(&hartid_mask));
}
static struct riscv_ipi_ops sbi_ipi_ops = {
.ipi_inject = sbi_send_cpumask_ipi
};
int __init sbi_init(void) int __init sbi_init(void)
{ {
...@@ -587,5 +599,7 @@ int __init sbi_init(void) ...@@ -587,5 +599,7 @@ int __init sbi_init(void)
__sbi_rfence = __sbi_rfence_v01; __sbi_rfence = __sbi_rfence_v01;
} }
riscv_set_ipi_ops(&sbi_ipi_ops);
return 0; return 0;
} }
...@@ -86,9 +86,25 @@ static void ipi_stop(void) ...@@ -86,9 +86,25 @@ static void ipi_stop(void)
wait_for_interrupt(); wait_for_interrupt();
} }
static struct riscv_ipi_ops *ipi_ops;
void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
{
ipi_ops = ops;
}
EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
void riscv_clear_ipi(void)
{
if (ipi_ops && ipi_ops->ipi_clear)
ipi_ops->ipi_clear();
csr_clear(CSR_IP, IE_SIE);
}
EXPORT_SYMBOL_GPL(riscv_clear_ipi);
static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
{ {
struct cpumask hartid_mask;
int cpu; int cpu;
smp_mb__before_atomic(); smp_mb__before_atomic();
...@@ -96,33 +112,22 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) ...@@ -96,33 +112,22 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
set_bit(op, &ipi_data[cpu].bits); set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic(); smp_mb__after_atomic();
riscv_cpuid_to_hartid_mask(mask, &hartid_mask); if (ipi_ops && ipi_ops->ipi_inject)
if (IS_ENABLED(CONFIG_RISCV_SBI)) ipi_ops->ipi_inject(mask);
sbi_send_ipi(cpumask_bits(&hartid_mask));
else else
clint_send_ipi_mask(mask); pr_warn("SMP: IPI inject method not available\n");
} }
static void send_ipi_single(int cpu, enum ipi_message_type op) static void send_ipi_single(int cpu, enum ipi_message_type op)
{ {
int hartid = cpuid_to_hartid_map(cpu);
smp_mb__before_atomic(); smp_mb__before_atomic();
set_bit(op, &ipi_data[cpu].bits); set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic(); smp_mb__after_atomic();
if (IS_ENABLED(CONFIG_RISCV_SBI)) if (ipi_ops && ipi_ops->ipi_inject)
sbi_send_ipi(cpumask_bits(cpumask_of(hartid))); ipi_ops->ipi_inject(cpumask_of(cpu));
else
clint_send_ipi_single(hartid);
}
static inline void clear_ipi(void)
{
if (IS_ENABLED(CONFIG_RISCV_SBI))
csr_clear(CSR_IP, IE_SIE);
else else
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id())); pr_warn("SMP: IPI inject method not available\n");
} }
#ifdef CONFIG_IRQ_WORK #ifdef CONFIG_IRQ_WORK
...@@ -140,7 +145,7 @@ void handle_IPI(struct pt_regs *regs) ...@@ -140,7 +145,7 @@ void handle_IPI(struct pt_regs *regs)
irq_enter(); irq_enter();
clear_ipi(); riscv_clear_ipi();
while (true) { while (true) {
unsigned long ops; unsigned long ops;
......
...@@ -147,8 +147,7 @@ asmlinkage __visible void smp_callin(void) ...@@ -147,8 +147,7 @@ asmlinkage __visible void smp_callin(void)
struct mm_struct *mm = &init_mm; struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id(); unsigned int curr_cpuid = smp_processor_id();
if (!IS_ENABLED(CONFIG_RISCV_SBI)) riscv_clear_ipi();
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
/* All kernel threads share the same mm context. */ /* All kernel threads share the same mm context. */
mmgrab(mm); mmgrab(mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment