Commit 9ed18b0b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-6.8-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:

 - fix missing TLB flush during early boot on SPARSEMEM_VMEMMAP
   configurations

 - fixes to correctly implement the break-before-make behavior requried
   by the ISA for NAPOT mappings

 - fix a missing TLB flush on intermediate mapping changes

 - fix build warning about a missing declaration of overflow_stack

 - fix performace regression related to incorrect tracking of completed
   batch TLB flushes

* tag 'riscv-for-linus-6.8-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: Fix arch_tlbbatch_flush() by clearing the batch cpumask
  riscv: declare overflow_stack as exported from traps.c
  riscv: Fix arch_hugetlb_migration_supported() for NAPOT
  riscv: Flush the tlb when a page directory is freed
  riscv: Fix hugetlb_mask_last_page() when NAPOT is enabled
  riscv: Fix set_huge_pte_at() for NAPOT mapping
  riscv: mm: execute local TLB flush after populating vmemmap
parents ca8a6673 3951f6ad
...@@ -11,6 +11,9 @@ static inline void arch_clear_hugepage_flags(struct page *page) ...@@ -11,6 +11,9 @@ static inline void arch_clear_hugepage_flags(struct page *page)
} }
#define arch_clear_hugepage_flags arch_clear_hugepage_flags #define arch_clear_hugepage_flags arch_clear_hugepage_flags
bool arch_hugetlb_migration_supported(struct hstate *h);
#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported
#ifdef CONFIG_RISCV_ISA_SVNAPOT #ifdef CONFIG_RISCV_ISA_SVNAPOT
#define __HAVE_ARCH_HUGE_PTE_CLEAR #define __HAVE_ARCH_HUGE_PTE_CLEAR
void huge_pte_clear(struct mm_struct *mm, unsigned long addr, void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
......
...@@ -21,4 +21,9 @@ static inline bool on_thread_stack(void) ...@@ -21,4 +21,9 @@ static inline bool on_thread_stack(void)
return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
} }
#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
#endif /* CONFIG_VMAP_STACK */
#endif /* _ASM_RISCV_STACKTRACE_H */ #endif /* _ASM_RISCV_STACKTRACE_H */
...@@ -16,7 +16,7 @@ static void tlb_flush(struct mmu_gather *tlb); ...@@ -16,7 +16,7 @@ static void tlb_flush(struct mmu_gather *tlb);
static inline void tlb_flush(struct mmu_gather *tlb) static inline void tlb_flush(struct mmu_gather *tlb)
{ {
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
if (tlb->fullmm || tlb->need_flush_all) if (tlb->fullmm || tlb->need_flush_all || tlb->freed_tables)
flush_tlb_mm(tlb->mm); flush_tlb_mm(tlb->mm);
else else
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
......
...@@ -75,6 +75,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, ...@@ -75,6 +75,7 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#define flush_tlb_mm(mm) flush_tlb_all() #define flush_tlb_mm(mm) flush_tlb_all()
#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() #define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
#define local_flush_tlb_kernel_range(start, end) flush_tlb_all()
#endif /* !CONFIG_SMP || !CONFIG_MMU */ #endif /* !CONFIG_SMP || !CONFIG_MMU */
#endif /* _ASM_RISCV_TLBFLUSH_H */ #endif /* _ASM_RISCV_TLBFLUSH_H */
...@@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm, ...@@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return pte; return pte;
} }
unsigned long hugetlb_mask_last_page(struct hstate *h)
{
unsigned long hp_size = huge_page_size(h);
switch (hp_size) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SIZE:
return P4D_SIZE - PUD_SIZE;
#endif
case PMD_SIZE:
return PUD_SIZE - PMD_SIZE;
case napot_cont_size(NAPOT_CONT64KB_ORDER):
return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER);
default:
break;
}
return 0UL;
}
static pte_t get_clear_contig(struct mm_struct *mm, static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr, unsigned long addr,
pte_t *ptep, pte_t *ptep,
...@@ -177,13 +197,36 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) ...@@ -177,13 +197,36 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
return entry; return entry;
} }
static void clear_flush(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
unsigned long pgsize,
unsigned long ncontig)
{
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
unsigned long i, saddr = addr;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
ptep_get_and_clear(mm, addr, ptep);
flush_tlb_range(&vma, saddr, addr);
}
/*
* When dealing with NAPOT mappings, the privileged specification indicates that
* "if an update needs to be made, the OS generally should first mark all of the
* PTEs invalid, then issue SFENCE.VMA instruction(s) covering all 4 KiB regions
* within the range, [...] then update the PTE(s), as described in Section
* 4.2.1.". That's the equivalent of the Break-Before-Make approach used by
* arm64.
*/
void set_huge_pte_at(struct mm_struct *mm, void set_huge_pte_at(struct mm_struct *mm,
unsigned long addr, unsigned long addr,
pte_t *ptep, pte_t *ptep,
pte_t pte, pte_t pte,
unsigned long sz) unsigned long sz)
{ {
unsigned long hugepage_shift; unsigned long hugepage_shift, pgsize;
int i, pte_num; int i, pte_num;
if (sz >= PGDIR_SIZE) if (sz >= PGDIR_SIZE)
...@@ -198,7 +241,22 @@ void set_huge_pte_at(struct mm_struct *mm, ...@@ -198,7 +241,22 @@ void set_huge_pte_at(struct mm_struct *mm,
hugepage_shift = PAGE_SHIFT; hugepage_shift = PAGE_SHIFT;
pte_num = sz >> hugepage_shift; pte_num = sz >> hugepage_shift;
for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift)) pgsize = 1 << hugepage_shift;
if (!pte_present(pte)) {
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
set_ptes(mm, addr, ptep, pte, 1);
return;
}
if (!pte_napot(pte)) {
set_ptes(mm, addr, ptep, pte, 1);
return;
}
clear_flush(mm, addr, ptep, pgsize, pte_num);
for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
set_pte_at(mm, addr, ptep, pte); set_pte_at(mm, addr, ptep, pte);
} }
...@@ -306,7 +364,7 @@ void huge_pte_clear(struct mm_struct *mm, ...@@ -306,7 +364,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_clear(mm, addr, ptep); pte_clear(mm, addr, ptep);
} }
static __init bool is_napot_size(unsigned long size) static bool is_napot_size(unsigned long size)
{ {
unsigned long order; unsigned long order;
...@@ -334,7 +392,7 @@ arch_initcall(napot_hugetlbpages_init); ...@@ -334,7 +392,7 @@ arch_initcall(napot_hugetlbpages_init);
#else #else
static __init bool is_napot_size(unsigned long size) static bool is_napot_size(unsigned long size)
{ {
return false; return false;
} }
...@@ -351,7 +409,7 @@ int pmd_huge(pmd_t pmd) ...@@ -351,7 +409,7 @@ int pmd_huge(pmd_t pmd)
return pmd_leaf(pmd); return pmd_leaf(pmd);
} }
bool __init arch_hugetlb_valid_size(unsigned long size) static bool __hugetlb_valid_size(unsigned long size)
{ {
if (size == HPAGE_SIZE) if (size == HPAGE_SIZE)
return true; return true;
...@@ -363,6 +421,16 @@ bool __init arch_hugetlb_valid_size(unsigned long size) ...@@ -363,6 +421,16 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
return false; return false;
} }
bool __init arch_hugetlb_valid_size(unsigned long size)
{
return __hugetlb_valid_size(size);
}
bool arch_hugetlb_migration_supported(struct hstate *h)
{
return __hugetlb_valid_size(huge_page_size(h));
}
#ifdef CONFIG_CONTIG_ALLOC #ifdef CONFIG_CONTIG_ALLOC
static __init int gigantic_pages_init(void) static __init int gigantic_pages_init(void)
{ {
......
...@@ -1385,6 +1385,10 @@ void __init misc_mem_init(void) ...@@ -1385,6 +1385,10 @@ void __init misc_mem_init(void)
early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
arch_numa_init(); arch_numa_init();
sparse_init(); sparse_init();
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* The entire VMEMMAP region has been populated. Flush TLB for this region */
local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END);
#endif
zone_sizes_init(); zone_sizes_init();
arch_reserve_crashkernel(); arch_reserve_crashkernel();
memblock_dump_all(); memblock_dump_all();
......
...@@ -66,6 +66,7 @@ static inline void local_flush_tlb_range_asid(unsigned long start, ...@@ -66,6 +66,7 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
local_flush_tlb_range_threshold_asid(start, size, stride, asid); local_flush_tlb_range_threshold_asid(start, size, stride, asid);
} }
/* Flush a range of kernel pages without broadcasting */
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{ {
local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID); local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
...@@ -233,4 +234,5 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) ...@@ -233,4 +234,5 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{ {
__flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0, __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
FLUSH_TLB_MAX_SIZE, PAGE_SIZE); FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
cpumask_clear(&batch->cpumask);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment