Commit ba6f3596 authored by Alexandre Ghiti's avatar Alexandre Ghiti Committed by Palmer Dabbelt

riscv: Make __flush_tlb_range() loop over pte instead of flushing the whole tlb

Currently, when the range to flush covers more than one page (a 4K page or
a hugepage), __flush_tlb_range() flushes the whole tlb. Flushing the whole
tlb comes with a greater cost than flushing a single entry so we should
flush single entries up to a certain threshold so that:
threshold * cost of flushing a single entry < cost of flushing the whole
tlb.
Co-developed-by: default avatarMayuresh Chitale <mchitale@ventanamicro.com>
Signed-off-by: default avatarMayuresh Chitale <mchitale@ventanamicro.com>
Signed-off-by: default avatarAlexandre Ghiti <alexghiti@rivosinc.com>
Reviewed-by: default avatarAndrew Jones <ajones@ventanamicro.com>
Tested-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> # On RZ/Five SMARC
Reviewed-by: default avatarSamuel Holland <samuel.holland@sifive.com>
Tested-by: default avatarSamuel Holland <samuel.holland@sifive.com>
Link: https://lore.kernel.org/r/20231030133027.19542-4-alexghiti@rivosinc.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 9e113064
...@@ -273,9 +273,6 @@ void sbi_set_timer(uint64_t stime_value); ...@@ -273,9 +273,6 @@ void sbi_set_timer(uint64_t stime_value);
void sbi_shutdown(void); void sbi_shutdown(void);
void sbi_send_ipi(unsigned int cpu); void sbi_send_ipi(unsigned int cpu);
int sbi_remote_fence_i(const struct cpumask *cpu_mask); int sbi_remote_fence_i(const struct cpumask *cpu_mask);
int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size);
int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
unsigned long start, unsigned long start,
......
...@@ -11,6 +11,9 @@ ...@@ -11,6 +11,9 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/errata_list.h> #include <asm/errata_list.h>
#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1)
#define FLUSH_TLB_NO_ASID ((unsigned long)-1)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern unsigned long asid_mask; extern unsigned long asid_mask;
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/reboot.h> #include <linux/reboot.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/tlbflush.h>
/* default SBI version is 0.1 */ /* default SBI version is 0.1 */
unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
...@@ -376,32 +377,15 @@ int sbi_remote_fence_i(const struct cpumask *cpu_mask) ...@@ -376,32 +377,15 @@ int sbi_remote_fence_i(const struct cpumask *cpu_mask)
} }
EXPORT_SYMBOL(sbi_remote_fence_i); EXPORT_SYMBOL(sbi_remote_fence_i);
/**
* sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote
* harts for the specified virtual address range.
* @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the virtual address
* @size: Total size of the virtual address range.
*
* Return: 0 on success, appropriate linux error code otherwise.
*/
int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size)
{
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
cpu_mask, start, size, 0, 0);
}
EXPORT_SYMBOL(sbi_remote_sfence_vma);
/** /**
* sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
* remote harts for a virtual address range belonging to a specific ASID. * remote harts for a virtual address range belonging to a specific ASID or not.
* *
* @cpu_mask: A cpu mask containing all the target harts. * @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the virtual address * @start: Start of the virtual address
* @size: Total size of the virtual address range. * @size: Total size of the virtual address range.
* @asid: The value of address space identifier (ASID). * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID
* for flushing all address spaces.
* *
* Return: 0 on success, appropriate linux error code otherwise. * Return: 0 on success, appropriate linux error code otherwise.
*/ */
...@@ -410,8 +394,12 @@ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, ...@@ -410,8 +394,12 @@ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
unsigned long size, unsigned long size,
unsigned long asid) unsigned long asid)
{ {
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, if (asid == FLUSH_TLB_NO_ASID)
cpu_mask, start, size, asid, 0); return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
cpu_mask, start, size, 0, 0);
else
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
cpu_mask, start, size, asid, 0);
} }
EXPORT_SYMBOL(sbi_remote_sfence_vma_asid); EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
......
...@@ -9,28 +9,50 @@ ...@@ -9,28 +9,50 @@
static inline void local_flush_tlb_all_asid(unsigned long asid) static inline void local_flush_tlb_all_asid(unsigned long asid)
{ {
__asm__ __volatile__ ("sfence.vma x0, %0" if (asid != FLUSH_TLB_NO_ASID)
: __asm__ __volatile__ ("sfence.vma x0, %0"
: "r" (asid) :
: "memory"); : "r" (asid)
: "memory");
else
local_flush_tlb_all();
} }
static inline void local_flush_tlb_page_asid(unsigned long addr, static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid) unsigned long asid)
{ {
__asm__ __volatile__ ("sfence.vma %0, %1" if (asid != FLUSH_TLB_NO_ASID)
: __asm__ __volatile__ ("sfence.vma %0, %1"
: "r" (addr), "r" (asid) :
: "memory"); : "r" (addr), "r" (asid)
: "memory");
else
local_flush_tlb_page(addr);
} }
static inline void local_flush_tlb_range(unsigned long start, /*
unsigned long size, unsigned long stride) * Flush entire TLB if number of entries to be flushed is greater
* than the threshold below.
*/
static unsigned long tlb_flush_all_threshold __read_mostly = 64;
static void local_flush_tlb_range_threshold_asid(unsigned long start,
unsigned long size,
unsigned long stride,
unsigned long asid)
{ {
if (size <= stride) unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
local_flush_tlb_page(start); int i;
else
local_flush_tlb_all(); if (nr_ptes_in_range > tlb_flush_all_threshold) {
local_flush_tlb_all_asid(asid);
return;
}
for (i = 0; i < nr_ptes_in_range; ++i) {
local_flush_tlb_page_asid(start, asid);
start += stride;
}
} }
static inline void local_flush_tlb_range_asid(unsigned long start, static inline void local_flush_tlb_range_asid(unsigned long start,
...@@ -38,8 +60,10 @@ static inline void local_flush_tlb_range_asid(unsigned long start, ...@@ -38,8 +60,10 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
{ {
if (size <= stride) if (size <= stride)
local_flush_tlb_page_asid(start, asid); local_flush_tlb_page_asid(start, asid);
else else if (size == FLUSH_TLB_MAX_SIZE)
local_flush_tlb_all_asid(asid); local_flush_tlb_all_asid(asid);
else
local_flush_tlb_range_threshold_asid(start, size, stride, asid);
} }
static void __ipi_flush_tlb_all(void *info) static void __ipi_flush_tlb_all(void *info)
...@@ -52,7 +76,7 @@ void flush_tlb_all(void) ...@@ -52,7 +76,7 @@ void flush_tlb_all(void)
if (riscv_use_ipi_for_rfence()) if (riscv_use_ipi_for_rfence())
on_each_cpu(__ipi_flush_tlb_all, NULL, 1); on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
else else
sbi_remote_sfence_vma(NULL, 0, -1); sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
} }
struct flush_tlb_range_data { struct flush_tlb_range_data {
...@@ -69,18 +93,12 @@ static void __ipi_flush_tlb_range_asid(void *info) ...@@ -69,18 +93,12 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid); local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
} }
static void __ipi_flush_tlb_range(void *info)
{
struct flush_tlb_range_data *d = info;
local_flush_tlb_range(d->start, d->size, d->stride);
}
static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride) unsigned long size, unsigned long stride)
{ {
struct flush_tlb_range_data ftd; struct flush_tlb_range_data ftd;
struct cpumask *cmask = mm_cpumask(mm); struct cpumask *cmask = mm_cpumask(mm);
unsigned long asid = FLUSH_TLB_NO_ASID;
unsigned int cpuid; unsigned int cpuid;
bool broadcast; bool broadcast;
...@@ -90,39 +108,24 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, ...@@ -90,39 +108,24 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
cpuid = get_cpu(); cpuid = get_cpu();
/* check if the tlbflush needs to be sent to other CPUs */ /* check if the tlbflush needs to be sent to other CPUs */
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
if (static_branch_unlikely(&use_asid_allocator)) {
unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask; if (static_branch_unlikely(&use_asid_allocator))
asid = atomic_long_read(&mm->context.id) & asid_mask;
if (broadcast) {
if (riscv_use_ipi_for_rfence()) { if (broadcast) {
ftd.asid = asid; if (riscv_use_ipi_for_rfence()) {
ftd.start = start; ftd.asid = asid;
ftd.size = size; ftd.start = start;
ftd.stride = stride; ftd.size = size;
on_each_cpu_mask(cmask, ftd.stride = stride;
__ipi_flush_tlb_range_asid, on_each_cpu_mask(cmask,
&ftd, 1); __ipi_flush_tlb_range_asid,
} else &ftd, 1);
sbi_remote_sfence_vma_asid(cmask, } else
start, size, asid); sbi_remote_sfence_vma_asid(cmask,
} else { start, size, asid);
local_flush_tlb_range_asid(start, size, stride, asid);
}
} else { } else {
if (broadcast) { local_flush_tlb_range_asid(start, size, stride, asid);
if (riscv_use_ipi_for_rfence()) {
ftd.asid = 0;
ftd.start = start;
ftd.size = size;
ftd.stride = stride;
on_each_cpu_mask(cmask,
__ipi_flush_tlb_range,
&ftd, 1);
} else
sbi_remote_sfence_vma(cmask, start, size);
} else {
local_flush_tlb_range(start, size, stride);
}
} }
put_cpu(); put_cpu();
...@@ -130,7 +133,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, ...@@ -130,7 +133,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_mm(struct mm_struct *mm)
{ {
__flush_tlb_range(mm, 0, -1, PAGE_SIZE); __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
} }
void flush_tlb_mm_range(struct mm_struct *mm, void flush_tlb_mm_range(struct mm_struct *mm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment