Commit ca6c1af3 authored by Baoquan He's avatar Baoquan He Committed by Andrew Morton

xtensa: mm: convert to GENERIC_IOREMAP

By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(),
generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() and
iounmap() are all visible and available to arch.  Arch needs to provide
wrapper functions to override the generic versions if there's arch
specific handling in its ioremap_prot(), ioremap() or iounmap().  This
change will simplify implementation by removing duplicated code with
generic_ioremap_prot() and generic_iounmap(), and has the equivalent
functioality as before.

Here, add wrapper functions ioremap_prot(), ioremap() and iounmap() for
xtensa's special operation when ioremap() and iounmap().

Link: https://lkml.kernel.org/r/20230706154520.11257-14-bhe@redhat.comSigned-off-by: default avatarBaoquan He <bhe@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: Chris Zankel <chris@zankel.net>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brian Cain <bcain@quicinc.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Laight <David.Laight@ACULAB.COM>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Niklas Schnelle <schnelle@linux.ibm.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0453c9a7
...@@ -28,6 +28,7 @@ config XTENSA ...@@ -28,6 +28,7 @@ config XTENSA
select GENERIC_LIB_UCMPDI2 select GENERIC_LIB_UCMPDI2
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select GENERIC_IOREMAP if MMU
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <asm/vectors.h> #include <asm/vectors.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/pgtable.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -24,22 +25,24 @@ ...@@ -24,22 +25,24 @@
#define PCI_IOBASE ((void __iomem *)XCHAL_KIO_BYPASS_VADDR) #define PCI_IOBASE ((void __iomem *)XCHAL_KIO_BYPASS_VADDR)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size);
void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size);
void xtensa_iounmap(volatile void __iomem *addr);
/* /*
* Return the virtual address for the specified bus memory. * I/O memory mapping functions.
*/ */
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot);
#define ioremap_prot ioremap_prot
#define iounmap iounmap
static inline void __iomem *ioremap(unsigned long offset, unsigned long size) static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{ {
if (offset >= XCHAL_KIO_PADDR if (offset >= XCHAL_KIO_PADDR
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR); return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
else else
return xtensa_ioremap_nocache(offset, size); return ioremap_prot(offset, size,
pgprot_val(pgprot_noncached(PAGE_KERNEL)));
} }
#define ioremap ioremap
static inline void __iomem *ioremap_cache(unsigned long offset, static inline void __iomem *ioremap_cache(unsigned long offset,
unsigned long size) unsigned long size)
...@@ -48,21 +51,10 @@ static inline void __iomem *ioremap_cache(unsigned long offset, ...@@ -48,21 +51,10 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
&& offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR); return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
else else
return xtensa_ioremap_cache(offset, size); return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL));
}
#define ioremap_cache ioremap_cache
static inline void iounmap(volatile void __iomem *addr)
{
unsigned long va = (unsigned long) addr;
if (!(va >= XCHAL_KIO_CACHED_VADDR &&
va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) &&
!(va >= XCHAL_KIO_BYPASS_VADDR &&
va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
xtensa_iounmap(addr);
} }
#define ioremap_cache ioremap_cache
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#include <asm-generic/io.h> #include <asm-generic/io.h>
......
...@@ -6,60 +6,30 @@ ...@@ -6,60 +6,30 @@
*/ */
#include <linux/io.h> #include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/io.h> #include <asm/io.h>
static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size, void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
pgprot_t prot) unsigned long prot)
{ {
unsigned long offset = paddr & ~PAGE_MASK; unsigned long pfn = __phys_to_pfn((phys_addr));
unsigned long pfn = __phys_to_pfn(paddr);
struct vm_struct *area;
unsigned long vaddr;
int err;
paddr &= PAGE_MASK;
WARN_ON(pfn_valid(pfn)); WARN_ON(pfn_valid(pfn));
size = PAGE_ALIGN(offset + size); return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
vaddr = (unsigned long)area->addr;
area->phys_addr = paddr;
err = ioremap_page_range(vaddr, vaddr + size, paddr, prot);
if (err) {
vunmap((void *)vaddr);
return NULL;
}
flush_cache_vmap(vaddr, vaddr + size);
return (void __iomem *)(offset + vaddr);
}
void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size)
{
return xtensa_ioremap(addr, size, pgprot_noncached(PAGE_KERNEL));
} }
EXPORT_SYMBOL(xtensa_ioremap_nocache); EXPORT_SYMBOL(ioremap_prot);
void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size) void iounmap(volatile void __iomem *addr)
{ {
return xtensa_ioremap(addr, size, PAGE_KERNEL); unsigned long va = (unsigned long) addr;
}
EXPORT_SYMBOL(xtensa_ioremap_cache);
void xtensa_iounmap(volatile void __iomem *io_addr) if ((va >= XCHAL_KIO_CACHED_VADDR &&
{ va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) ||
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); (va >= XCHAL_KIO_BYPASS_VADDR &&
va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
return;
vunmap(addr); generic_iounmap(addr);
} }
EXPORT_SYMBOL(xtensa_iounmap); EXPORT_SYMBOL(iounmap);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment