Commit 665f6402 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

sparc32: implement the new page table range API

Add PFN_PTE_SHIFT, update_mmu_cache_range(), flush_dcache_folio() and
flush_icache_pages().

Link: https://lkml.kernel.org/r/20230802151406.3735276-26-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 157efa29
......@@ -2,6 +2,7 @@
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H
#include <linux/page-flags.h>
#include <asm/cachetlb_32.h>
#define flush_cache_all() \
......@@ -16,6 +17,7 @@
sparc32_cachetlb_ops->cache_page(vma, addr)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_pages(vma, pg, nr) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
......@@ -35,11 +37,15 @@
#define flush_page_for_dma(addr) \
sparc32_cachetlb_ops->page_for_dma(addr)
struct page;
void sparc_flush_page_to_ram(struct page *page);
void sparc_flush_folio_to_ram(struct folio *folio);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
#define flush_dcache_folio(folio) sparc_flush_folio_to_ram(folio)
static inline void flush_dcache_page(struct page *page)
{
flush_dcache_folio(page_folio(page));
}
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
......@@ -101,8 +101,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
srmmu_swap((unsigned long *)ptep, pte_val(pteval));
}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
static inline int srmmu_device_memory(unsigned long x)
{
return ((x & 0xF0000000) != 0);
......@@ -256,6 +254,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
return __pte(pte_val(pte) | SRMMU_REF);
}
#define PFN_PTE_SHIFT (PAGE_SHIFT - 4)
#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
static inline unsigned long pte_pfn(pte_t pte)
......@@ -268,7 +267,7 @@ static inline unsigned long pte_pfn(pte_t pte)
*/
return ~0UL;
}
return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
return (pte_val(pte) & SRMMU_PTE_PMASK) >> PFN_PTE_SHIFT;
}
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
......@@ -318,6 +317,7 @@ void mmu_info(struct seq_file *m);
#define FAULT_CODE_USER 0x4
#define update_mmu_cache(vma, address, ptep) do { } while (0)
#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do { } while (0)
void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
unsigned long xva, unsigned int len);
......@@ -422,7 +422,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
({ \
int __changed = !pte_same(*(__ptep), __entry); \
if (__changed) { \
set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
set_pte(__ptep, __entry); \
flush_tlb_page(__vma, __address); \
} \
__changed; \
......
......@@ -297,11 +297,20 @@ void sparc_flush_page_to_ram(struct page *page)
{
unsigned long vaddr = (unsigned long)page_address(page);
if (vaddr)
__flush_page_to_ram(vaddr);
__flush_page_to_ram(vaddr);
}
EXPORT_SYMBOL(sparc_flush_page_to_ram);
void sparc_flush_folio_to_ram(struct folio *folio)
{
unsigned long vaddr = (unsigned long)folio_address(folio);
unsigned int i, nr = folio_nr_pages(folio);
for (i = 0; i < nr; i++)
__flush_page_to_ram(vaddr + i * PAGE_SIZE);
}
EXPORT_SYMBOL(sparc_flush_folio_to_ram);
static const pgprot_t protection_map[16] = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_READONLY,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment