Commit 4f16345d authored by Palmer Dabbelt's avatar Palmer Dabbelt

Merge patch series "riscv: ASID-related and UP-related TLB flush enhancements"

Samuel Holland <samuel.holland@sifive.com> says:

This series converts uniprocessor kernel builds to use the same TLB
flushing code as SMP builds, to take advantage of batching and existing
range- and ASID-based TLB flush optimizations. It optimizes out IPIs and
SBI calls based on the online CPU count, which also covers the scenario
where SMP was enabled at build time but only one CPU is present/online.
A final optimization is to use single-ASID flushes wherever possible, to
avoid unnecessary TLB misses for kernel mappings.

This series has a semantic conflict with the AIA patches that are in
linux-next due to the removal of the third parameter of
riscv_ipi_set_virq_range(), which is called from imsic_ipi_domain_init()
in drivers/irqchip/irq-riscv-imsic-early.c. The resolution is to remove
the extra argument from the call site.

Here are some numbers from D1 which show the performance impact:

v6.9-rc1:
 System Benchmarks Partial Index              BASELINE       RESULT    INDEX
 Execl Throughput                                 43.0        198.5     46.2
 File Copy 1024 bufsize 2000 maxblocks          3960.0      73934.4    186.7
 File Copy 256 bufsize 500 maxblocks            1655.0      20242.6    122.3
 File Copy 4096 bufsize 8000 maxblocks          5800.0     197706.4    340.9
 Pipe Throughput                               12440.0     176974.2    142.3
 Pipe-based Context Switching                   4000.0      23626.8     59.1
 Process Creation                                126.0        449.9     35.7
 Shell Scripts (1 concurrent)                     42.4        544.4    128.4
 Shell Scripts (16 concurrent)                     ---         35.3      ---
 Shell Scripts (8 concurrent)                      6.0         71.6    119.3
 System Call Overhead                          15000.0     248072.6    165.4
                                                                    ========
 System Benchmarks Index Score (Partial Only)                          110.6

v6.9-rc1 + this patch series:
 System Benchmarks Partial Index              BASELINE       RESULT    INDEX
 Execl Throughput                                 43.0        196.8     45.8
 File Copy 1024 bufsize 2000 maxblocks          3960.0      71782.2    181.3
 File Copy 256 bufsize 500 maxblocks            1655.0      21269.4    128.5
 File Copy 4096 bufsize 8000 maxblocks          5800.0     199424.0    343.8
 Pipe Throughput                               12440.0     196468.6    157.9
 Pipe-based Context Switching                   4000.0      24261.8     60.7
 Process Creation                                126.0        459.0     36.4
 Shell Scripts (1 concurrent)                     42.4        543.8    128.2
 Shell Scripts (16 concurrent)                     ---         35.5      ---
 Shell Scripts (8 concurrent)                      6.0         71.7    119.6
 System Call Overhead                          15000.0     259415.2    172.9
                                                                    ========
 System Benchmarks Index Score (Partial Only)                          113.0

* b4-shazam-lts:
  riscv: mm: Always use an ASID to flush mm contexts
  riscv: mm: Preserve global TLB entries when switching contexts
  riscv: mm: Make asid_bits a local variable
  riscv: mm: Use a fixed layout for the MM context ID
  riscv: mm: Introduce cntx2asid/cntx2version helper macros
  riscv: Avoid TLB flush loops when affected by SiFive CIP-1200
  riscv: Apply SiFive CIP-1200 workaround to single-ASID sfence.vma
  riscv: mm: Combine the SMP and UP TLB flush code
  riscv: Only send remote fences when some other CPU is online
  riscv: mm: Broadcast kernel TLB flushes only when needed
  riscv: Use IPIs for remote cache/TLB flushes by default
  riscv: Factor out page table TLB synchronization
  riscv: Flush the instruction cache during SMP bringup

Link: https://lore.kernel.org/r/20240327045035.368512-1-samuel.holland@sifive.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parents 48b4fc66 daef1926
...@@ -62,7 +62,7 @@ config RISCV ...@@ -62,7 +62,7 @@ config RISCV
select ARCH_USE_MEMTEST select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USES_CFI_TRAPS if CFI_CLANG select ARCH_USES_CFI_TRAPS if CFI_CLANG
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP && MMU select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if MMU
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
......
...@@ -42,6 +42,11 @@ static bool errata_cip_1200_check_func(unsigned long arch_id, unsigned long imp ...@@ -42,6 +42,11 @@ static bool errata_cip_1200_check_func(unsigned long arch_id, unsigned long imp
return false; return false;
if ((impid & 0xffffff) > 0x200630 || impid == 0x1200626) if ((impid & 0xffffff) > 0x200630 || impid == 0x1200626)
return false; return false;
#ifdef CONFIG_MMU
tlb_flush_all_threshold = 0;
#endif
return true; return true;
} }
......
...@@ -43,11 +43,21 @@ ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \ ...@@ -43,11 +43,21 @@ ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \
CONFIG_ERRATA_SIFIVE_CIP_453) CONFIG_ERRATA_SIFIVE_CIP_453)
#else /* !__ASSEMBLY__ */ #else /* !__ASSEMBLY__ */
#define ALT_FLUSH_TLB_PAGE(x) \ #define ALT_SFENCE_VMA_ASID(asid) \
asm(ALTERNATIVE("sfence.vma x0, %0", "sfence.vma", SIFIVE_VENDOR_ID, \
ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
: : "r" (asid) : "memory")
#define ALT_SFENCE_VMA_ADDR(addr) \
asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \ asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
: : "r" (addr) : "memory") : : "r" (addr) : "memory")
#define ALT_SFENCE_VMA_ADDR_ASID(addr, asid) \
asm(ALTERNATIVE("sfence.vma %0, %1", "sfence.vma", SIFIVE_VENDOR_ID, \
ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
: : "r" (addr), "r" (asid) : "memory")
/* /*
* _val is marked as "will be overwritten", so need to set it to 0 * _val is marked as "will be overwritten", so need to set it to 0
* in the default case. * in the default case.
......
...@@ -28,6 +28,9 @@ typedef struct { ...@@ -28,6 +28,9 @@ typedef struct {
#endif #endif
} mm_context_t; } mm_context_t;
#define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK)
#define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK)
void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot); phys_addr_t sz, pgprot_t prot);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define _ASM_RISCV_PGALLOC_H #define _ASM_RISCV_PGALLOC_H
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/sbi.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
...@@ -15,6 +16,14 @@ ...@@ -15,6 +16,14 @@
#define __HAVE_ARCH_PUD_FREE #define __HAVE_ARCH_PUD_FREE
#include <asm-generic/pgalloc.h> #include <asm-generic/pgalloc.h>
static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
{
if (riscv_use_sbi_for_rfence())
tlb_remove_ptdesc(tlb, pt);
else
tlb_remove_page_ptdesc(tlb, pt);
}
static inline void pmd_populate_kernel(struct mm_struct *mm, static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte) pmd_t *pmd, pte_t *pte)
{ {
...@@ -102,10 +111,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, ...@@ -102,10 +111,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
struct ptdesc *ptdesc = virt_to_ptdesc(pud); struct ptdesc *ptdesc = virt_to_ptdesc(pud);
pagetable_pud_dtor(ptdesc); pagetable_pud_dtor(ptdesc);
if (riscv_use_ipi_for_rfence()) riscv_tlb_remove_ptdesc(tlb, ptdesc);
tlb_remove_page_ptdesc(tlb, ptdesc);
else
tlb_remove_ptdesc(tlb, ptdesc);
} }
} }
...@@ -139,12 +145,8 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) ...@@ -139,12 +145,8 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr) unsigned long addr)
{ {
if (pgtable_l5_enabled) { if (pgtable_l5_enabled)
if (riscv_use_ipi_for_rfence()) riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d));
else
tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
} }
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
...@@ -176,10 +178,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, ...@@ -176,10 +178,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
struct ptdesc *ptdesc = virt_to_ptdesc(pmd); struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
pagetable_pmd_dtor(ptdesc); pagetable_pmd_dtor(ptdesc);
if (riscv_use_ipi_for_rfence()) riscv_tlb_remove_ptdesc(tlb, ptdesc);
tlb_remove_page_ptdesc(tlb, ptdesc);
else
tlb_remove_ptdesc(tlb, ptdesc);
} }
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
...@@ -190,10 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, ...@@ -190,10 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
struct ptdesc *ptdesc = page_ptdesc(pte); struct ptdesc *ptdesc = page_ptdesc(pte);
pagetable_pte_dtor(ptdesc); pagetable_pte_dtor(ptdesc);
if (riscv_use_ipi_for_rfence()) riscv_tlb_remove_ptdesc(tlb, ptdesc);
tlb_remove_page_ptdesc(tlb, ptdesc);
else
tlb_remove_ptdesc(tlb, ptdesc);
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
......
...@@ -375,8 +375,12 @@ unsigned long riscv_cached_marchid(unsigned int cpu_id); ...@@ -375,8 +375,12 @@ unsigned long riscv_cached_marchid(unsigned int cpu_id);
unsigned long riscv_cached_mimpid(unsigned int cpu_id); unsigned long riscv_cached_mimpid(unsigned int cpu_id);
#if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI) #if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI)
DECLARE_STATIC_KEY_FALSE(riscv_sbi_for_rfence);
#define riscv_use_sbi_for_rfence() \
static_branch_unlikely(&riscv_sbi_for_rfence)
void sbi_ipi_init(void); void sbi_ipi_init(void);
#else #else
static inline bool riscv_use_sbi_for_rfence(void) { return false; }
static inline void sbi_ipi_init(void) { } static inline void sbi_ipi_init(void) { }
#endif #endif
......
...@@ -49,12 +49,7 @@ void riscv_ipi_disable(void); ...@@ -49,12 +49,7 @@ void riscv_ipi_disable(void);
bool riscv_ipi_have_virq_range(void); bool riscv_ipi_have_virq_range(void);
/* Set the IPI interrupt numbers for arch (called by irqchip drivers) */ /* Set the IPI interrupt numbers for arch (called by irqchip drivers) */
void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence); void riscv_ipi_set_virq_range(int virq, int nr);
/* Check if we can use IPIs for remote FENCEs */
DECLARE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
#define riscv_use_ipi_for_rfence() \
static_branch_unlikely(&riscv_ipi_for_rfence)
/* Check other CPUs stop or not */ /* Check other CPUs stop or not */
bool smp_crash_stop_failed(void); bool smp_crash_stop_failed(void);
...@@ -104,16 +99,10 @@ static inline bool riscv_ipi_have_virq_range(void) ...@@ -104,16 +99,10 @@ static inline bool riscv_ipi_have_virq_range(void)
return false; return false;
} }
static inline void riscv_ipi_set_virq_range(int virq, int nr, static inline void riscv_ipi_set_virq_range(int virq, int nr)
bool use_for_rfence)
{ {
} }
static inline bool riscv_use_ipi_for_rfence(void)
{
return false;
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP) #if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
......
...@@ -15,24 +15,34 @@ ...@@ -15,24 +15,34 @@
#define FLUSH_TLB_NO_ASID ((unsigned long)-1) #define FLUSH_TLB_NO_ASID ((unsigned long)-1)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
extern unsigned long asid_mask;
static inline void local_flush_tlb_all(void) static inline void local_flush_tlb_all(void)
{ {
__asm__ __volatile__ ("sfence.vma" : : : "memory"); __asm__ __volatile__ ("sfence.vma" : : : "memory");
} }
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
if (asid != FLUSH_TLB_NO_ASID)
ALT_SFENCE_VMA_ASID(asid);
else
local_flush_tlb_all();
}
/* Flush one page from local TLB */ /* Flush one page from local TLB */
static inline void local_flush_tlb_page(unsigned long addr) static inline void local_flush_tlb_page(unsigned long addr)
{ {
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); ALT_SFENCE_VMA_ADDR(addr);
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
if (asid != FLUSH_TLB_NO_ASID)
ALT_SFENCE_VMA_ADDR_ASID(addr, asid);
else
local_flush_tlb_page(addr);
} }
#else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0)
#define local_flush_tlb_page(addr) do { } while (0)
#endif /* CONFIG_MMU */
#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
void flush_tlb_all(void); void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm); void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
...@@ -55,27 +65,9 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, ...@@ -55,27 +65,9 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
void arch_flush_tlb_batched_pending(struct mm_struct *mm); void arch_flush_tlb_batched_pending(struct mm_struct *mm);
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
#else /* CONFIG_SMP && CONFIG_MMU */ extern unsigned long tlb_flush_all_threshold;
#else /* CONFIG_MMU */
#define flush_tlb_all() local_flush_tlb_all() #define local_flush_tlb_all() do { } while (0)
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr) #endif /* CONFIG_MMU */
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
local_flush_tlb_all();
}
/* Flush a range of kernel pages */
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
local_flush_tlb_all();
}
#define flush_tlb_mm(mm) flush_tlb_all()
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */
#endif /* _ASM_RISCV_TLBFLUSH_H */ #endif /* _ASM_RISCV_TLBFLUSH_H */
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <asm/sbi.h> #include <asm/sbi.h>
DEFINE_STATIC_KEY_FALSE(riscv_sbi_for_rfence);
EXPORT_SYMBOL_GPL(riscv_sbi_for_rfence);
static int sbi_ipi_virq; static int sbi_ipi_virq;
static void sbi_ipi_handle(struct irq_desc *desc) static void sbi_ipi_handle(struct irq_desc *desc)
...@@ -72,6 +75,12 @@ void __init sbi_ipi_init(void) ...@@ -72,6 +75,12 @@ void __init sbi_ipi_init(void)
"irqchip/sbi-ipi:starting", "irqchip/sbi-ipi:starting",
sbi_ipi_starting_cpu, NULL); sbi_ipi_starting_cpu, NULL);
riscv_ipi_set_virq_range(virq, BITS_PER_BYTE, false); riscv_ipi_set_virq_range(virq, BITS_PER_BYTE);
pr_info("providing IPIs using SBI IPI extension\n"); pr_info("providing IPIs using SBI IPI extension\n");
/*
* Use the SBI remote fence extension to avoid
* the extra context switch needed to handle IPIs.
*/
static_branch_enable(&riscv_sbi_for_rfence);
} }
...@@ -171,10 +171,7 @@ bool riscv_ipi_have_virq_range(void) ...@@ -171,10 +171,7 @@ bool riscv_ipi_have_virq_range(void)
return (ipi_virq_base) ? true : false; return (ipi_virq_base) ? true : false;
} }
DEFINE_STATIC_KEY_FALSE(riscv_ipi_for_rfence); void riscv_ipi_set_virq_range(int virq, int nr)
EXPORT_SYMBOL_GPL(riscv_ipi_for_rfence);
void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence)
{ {
int i, err; int i, err;
...@@ -197,12 +194,6 @@ void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence) ...@@ -197,12 +194,6 @@ void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence)
/* Enabled IPIs for boot CPU immediately */ /* Enabled IPIs for boot CPU immediately */
riscv_ipi_enable(); riscv_ipi_enable();
/* Update RFENCE static key */
if (use_for_rfence)
static_branch_enable(&riscv_ipi_for_rfence);
else
static_branch_disable(&riscv_ipi_for_rfence);
} }
static const char * const ipi_names[] = { static const char * const ipi_names[] = {
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <asm/cpufeature.h> #include <asm/cacheflush.h>
#include <asm/cpu_ops.h> #include <asm/cpu_ops.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -234,9 +234,10 @@ asmlinkage __visible void smp_callin(void) ...@@ -234,9 +234,10 @@ asmlinkage __visible void smp_callin(void)
riscv_user_isa_enable(); riscv_user_isa_enable();
/* /*
* Remote TLB flushes are ignored while the CPU is offline, so emit * Remote cache and TLB flushes are ignored while the CPU is offline,
* a local TLB flush right now just in case. * so flush them both right now just in case.
*/ */
local_flush_icache_all();
local_flush_tlb_all(); local_flush_tlb_all();
complete(&cpu_running); complete(&cpu_running);
/* /*
......
...@@ -13,14 +13,11 @@ endif ...@@ -13,14 +13,11 @@ endif
KCOV_INSTRUMENT_init.o := n KCOV_INSTRUMENT_init.o := n
obj-y += init.o obj-y += init.o
obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o tlbflush.o
obj-y += cacheflush.o obj-y += cacheflush.o
obj-y += context.o obj-y += context.o
obj-y += pmem.o obj-y += pmem.o
ifeq ($(CONFIG_MMU),y)
obj-$(CONFIG_SMP) += tlbflush.o
endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_KASAN) += kasan_init.o obj-$(CONFIG_KASAN) += kasan_init.o
......
...@@ -22,7 +22,9 @@ void flush_icache_all(void) ...@@ -22,7 +22,9 @@ void flush_icache_all(void)
{ {
local_flush_icache_all(); local_flush_icache_all();
if (IS_ENABLED(CONFIG_RISCV_SBI) && !riscv_use_ipi_for_rfence()) if (num_online_cpus() < 2)
return;
else if (riscv_use_sbi_for_rfence())
sbi_remote_fence_i(NULL); sbi_remote_fence_i(NULL);
else else
on_each_cpu(ipi_remote_fence_i, NULL, 1); on_each_cpu(ipi_remote_fence_i, NULL, 1);
...@@ -70,8 +72,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local) ...@@ -70,8 +72,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
* with flush_icache_deferred(). * with flush_icache_deferred().
*/ */
smp_mb(); smp_mb();
} else if (IS_ENABLED(CONFIG_RISCV_SBI) && } else if (riscv_use_sbi_for_rfence()) {
!riscv_use_ipi_for_rfence()) {
sbi_remote_fence_i(&others); sbi_remote_fence_i(&others);
} else { } else {
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
......
...@@ -21,9 +21,7 @@ ...@@ -21,9 +21,7 @@
DEFINE_STATIC_KEY_FALSE(use_asid_allocator); DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
static unsigned long asid_bits;
static unsigned long num_asids; static unsigned long num_asids;
unsigned long asid_mask;
static atomic_long_t current_version; static atomic_long_t current_version;
...@@ -82,7 +80,7 @@ static void __flush_context(void) ...@@ -82,7 +80,7 @@ static void __flush_context(void)
if (cntx == 0) if (cntx == 0)
cntx = per_cpu(reserved_context, i); cntx = per_cpu(reserved_context, i);
__set_bit(cntx & asid_mask, context_asid_map); __set_bit(cntx2asid(cntx), context_asid_map);
per_cpu(reserved_context, i) = cntx; per_cpu(reserved_context, i) = cntx;
} }
...@@ -103,7 +101,7 @@ static unsigned long __new_context(struct mm_struct *mm) ...@@ -103,7 +101,7 @@ static unsigned long __new_context(struct mm_struct *mm)
lockdep_assert_held(&context_lock); lockdep_assert_held(&context_lock);
if (cntx != 0) { if (cntx != 0) {
unsigned long newcntx = ver | (cntx & asid_mask); unsigned long newcntx = ver | cntx2asid(cntx);
/* /*
* If our current CONTEXT was active during a rollover, we * If our current CONTEXT was active during a rollover, we
...@@ -116,7 +114,7 @@ static unsigned long __new_context(struct mm_struct *mm) ...@@ -116,7 +114,7 @@ static unsigned long __new_context(struct mm_struct *mm)
* We had a valid CONTEXT in a previous life, so try to * We had a valid CONTEXT in a previous life, so try to
* re-use it if possible. * re-use it if possible.
*/ */
if (!__test_and_set_bit(cntx & asid_mask, context_asid_map)) if (!__test_and_set_bit(cntx2asid(cntx), context_asid_map))
return newcntx; return newcntx;
} }
...@@ -129,7 +127,7 @@ static unsigned long __new_context(struct mm_struct *mm) ...@@ -129,7 +127,7 @@ static unsigned long __new_context(struct mm_struct *mm)
goto set_asid; goto set_asid;
/* We're out of ASIDs, so increment current_version */ /* We're out of ASIDs, so increment current_version */
ver = atomic_long_add_return_relaxed(num_asids, &current_version); ver = atomic_long_add_return_relaxed(BIT(SATP_ASID_BITS), &current_version);
/* Flush everything */ /* Flush everything */
__flush_context(); __flush_context();
...@@ -169,7 +167,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu) ...@@ -169,7 +167,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
*/ */
old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu)); old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
if (old_active_cntx && if (old_active_cntx &&
((cntx & ~asid_mask) == atomic_long_read(&current_version)) && (cntx2version(cntx) == atomic_long_read(&current_version)) &&
atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu), atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
old_active_cntx, cntx)) old_active_cntx, cntx))
goto switch_mm_fast; goto switch_mm_fast;
...@@ -178,7 +176,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu) ...@@ -178,7 +176,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
/* Check that our ASID belongs to the current_version. */ /* Check that our ASID belongs to the current_version. */
cntx = atomic_long_read(&mm->context.id); cntx = atomic_long_read(&mm->context.id);
if ((cntx & ~asid_mask) != atomic_long_read(&current_version)) { if (cntx2version(cntx) != atomic_long_read(&current_version)) {
cntx = __new_context(mm); cntx = __new_context(mm);
atomic_long_set(&mm->context.id, cntx); atomic_long_set(&mm->context.id, cntx);
} }
...@@ -192,7 +190,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu) ...@@ -192,7 +190,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
switch_mm_fast: switch_mm_fast:
csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
((cntx & asid_mask) << SATP_ASID_SHIFT) | (cntx2asid(cntx) << SATP_ASID_SHIFT) |
satp_mode); satp_mode);
if (need_flush_tlb) if (need_flush_tlb)
...@@ -203,7 +201,7 @@ static void set_mm_noasid(struct mm_struct *mm) ...@@ -203,7 +201,7 @@ static void set_mm_noasid(struct mm_struct *mm)
{ {
/* Switch the page table and blindly nuke entire local TLB */ /* Switch the page table and blindly nuke entire local TLB */
csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | satp_mode); csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | satp_mode);
local_flush_tlb_all(); local_flush_tlb_all_asid(0);
} }
static inline void set_mm(struct mm_struct *prev, static inline void set_mm(struct mm_struct *prev,
...@@ -228,7 +226,7 @@ static inline void set_mm(struct mm_struct *prev, ...@@ -228,7 +226,7 @@ static inline void set_mm(struct mm_struct *prev,
static int __init asids_init(void) static int __init asids_init(void)
{ {
unsigned long old; unsigned long asid_bits, old;
/* Figure-out number of ASID bits in HW */ /* Figure-out number of ASID bits in HW */
old = csr_read(CSR_SATP); old = csr_read(CSR_SATP);
...@@ -248,7 +246,6 @@ static int __init asids_init(void) ...@@ -248,7 +246,6 @@ static int __init asids_init(void)
/* Pre-compute ASID details */ /* Pre-compute ASID details */
if (asid_bits) { if (asid_bits) {
num_asids = 1 << asid_bits; num_asids = 1 << asid_bits;
asid_mask = num_asids - 1;
} }
/* /*
...@@ -256,7 +253,7 @@ static int __init asids_init(void) ...@@ -256,7 +253,7 @@ static int __init asids_init(void)
* at-least twice more than CPUs * at-least twice more than CPUs
*/ */
if (num_asids > (2 * num_possible_cpus())) { if (num_asids > (2 * num_possible_cpus())) {
atomic_long_set(&current_version, num_asids); atomic_long_set(&current_version, BIT(SATP_ASID_BITS));
context_asid_map = bitmap_zalloc(num_asids, GFP_KERNEL); context_asid_map = bitmap_zalloc(num_asids, GFP_KERNEL);
if (!context_asid_map) if (!context_asid_map)
......
...@@ -7,34 +7,11 @@ ...@@ -7,34 +7,11 @@
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
if (asid != FLUSH_TLB_NO_ASID)
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
else
local_flush_tlb_all();
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
if (asid != FLUSH_TLB_NO_ASID)
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
else
local_flush_tlb_page(addr);
}
/* /*
* Flush entire TLB if number of entries to be flushed is greater * Flush entire TLB if number of entries to be flushed is greater
* than the threshold below. * than the threshold below.
*/ */
static unsigned long tlb_flush_all_threshold __read_mostly = 64; unsigned long tlb_flush_all_threshold __read_mostly = 64;
static void local_flush_tlb_range_threshold_asid(unsigned long start, static void local_flush_tlb_range_threshold_asid(unsigned long start,
unsigned long size, unsigned long size,
...@@ -79,10 +56,12 @@ static void __ipi_flush_tlb_all(void *info) ...@@ -79,10 +56,12 @@ static void __ipi_flush_tlb_all(void *info)
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
if (riscv_use_ipi_for_rfence()) if (num_online_cpus() < 2)
on_each_cpu(__ipi_flush_tlb_all, NULL, 1); local_flush_tlb_all();
else else if (riscv_use_sbi_for_rfence())
sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID); sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
else
on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
} }
struct flush_tlb_range_data { struct flush_tlb_range_data {
...@@ -103,46 +82,34 @@ static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid, ...@@ -103,46 +82,34 @@ static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
unsigned long start, unsigned long size, unsigned long start, unsigned long size,
unsigned long stride) unsigned long stride)
{ {
struct flush_tlb_range_data ftd; unsigned int cpu;
bool broadcast;
if (cpumask_empty(cmask)) if (cpumask_empty(cmask))
return; return;
if (cmask != cpu_online_mask) { cpu = get_cpu();
unsigned int cpuid;
cpuid = get_cpu(); /* Check if the TLB flush needs to be sent to other CPUs. */
/* check if the tlbflush needs to be sent to other CPUs */ if (cpumask_any_but(cmask, cpu) >= nr_cpu_ids) {
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; local_flush_tlb_range_asid(start, size, stride, asid);
} else if (riscv_use_sbi_for_rfence()) {
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
} else { } else {
broadcast = true; struct flush_tlb_range_data ftd;
}
if (broadcast) { ftd.asid = asid;
if (riscv_use_ipi_for_rfence()) { ftd.start = start;
ftd.asid = asid; ftd.size = size;
ftd.start = start; ftd.stride = stride;
ftd.size = size; on_each_cpu_mask(cmask, __ipi_flush_tlb_range_asid, &ftd, 1);
ftd.stride = stride;
on_each_cpu_mask(cmask,
__ipi_flush_tlb_range_asid,
&ftd, 1);
} else
sbi_remote_sfence_vma_asid(cmask,
start, size, asid);
} else {
local_flush_tlb_range_asid(start, size, stride, asid);
} }
if (cmask != cpu_online_mask) put_cpu();
put_cpu();
} }
static inline unsigned long get_mm_asid(struct mm_struct *mm) static inline unsigned long get_mm_asid(struct mm_struct *mm)
{ {
return static_branch_unlikely(&use_asid_allocator) ? return cntx2asid(atomic_long_read(&mm->context.id));
atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
} }
void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_mm(struct mm_struct *mm)
......
...@@ -251,7 +251,7 @@ static int __init clint_timer_init_dt(struct device_node *np) ...@@ -251,7 +251,7 @@ static int __init clint_timer_init_dt(struct device_node *np)
} }
irq_set_chained_handler(clint_ipi_irq, clint_ipi_interrupt); irq_set_chained_handler(clint_ipi_irq, clint_ipi_interrupt);
riscv_ipi_set_virq_range(rc, BITS_PER_BYTE, true); riscv_ipi_set_virq_range(rc, BITS_PER_BYTE);
clint_clear_ipi(); clint_clear_ipi();
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment