Commit 38d110ab authored by Baoquan He's avatar Baoquan He Committed by Andrew Morton

ia64: mm: convert to GENERIC_IOREMAP

By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(),
generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() and
iounmap() are all visible and available to arch.  Arch needs to provide
wrapper functions to override the generic versions if there's arch
specific handling in its ioremap_prot(), ioremap() or iounmap().  This
change will simplify implementation by removing duplicated code with
generic_ioremap_prot() and generic_iounmap(), and has the equivalent
functioality as before.

Here, add wrapper functions ioremap_prot() and iounmap() for ia64's
special operation when ioremap() and iounmap().

Link: https://lkml.kernel.org/r/20230706154520.11257-9-bhe@redhat.comSigned-off-by: default avatarBaoquan He <bhe@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brian Cain <bcain@quicinc.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Chris Zankel <chris@zankel.net>
Cc: David Laight <David.Laight@ACULAB.COM>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Niklas Schnelle <schnelle@linux.ibm.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 06dfae39
...@@ -47,6 +47,7 @@ config IA64 ...@@ -47,6 +47,7 @@ config IA64
select GENERIC_IRQ_LEGACY select GENERIC_IRQ_LEGACY
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_IOMAP select GENERIC_IOMAP
select GENERIC_IOREMAP
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select ARCH_TASK_STRUCT_ON_STACK select ARCH_TASK_STRUCT_ON_STACK
select ARCH_TASK_STRUCT_ALLOCATOR select ARCH_TASK_STRUCT_ALLOCATOR
......
...@@ -243,15 +243,12 @@ static inline void outsl(unsigned long port, const void *src, ...@@ -243,15 +243,12 @@ static inline void outsl(unsigned long port, const void *src,
# ifdef __KERNEL__ # ifdef __KERNEL__
extern void __iomem * ioremap(unsigned long offset, unsigned long size); #define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL)
extern void __iomem * ioremap_uc(unsigned long offset, unsigned long size); extern void __iomem * ioremap_uc(unsigned long offset, unsigned long size);
extern void iounmap (volatile void __iomem *addr);
static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) #define ioremap_prot ioremap_prot
{ #define ioremap_cache ioremap
return ioremap(phys_addr, size);
}
#define ioremap ioremap
#define ioremap_cache ioremap_cache
#define ioremap_uc ioremap_uc #define ioremap_uc ioremap_uc
#define iounmap iounmap #define iounmap iounmap
......
...@@ -29,13 +29,9 @@ early_ioremap (unsigned long phys_addr, unsigned long size) ...@@ -29,13 +29,9 @@ early_ioremap (unsigned long phys_addr, unsigned long size)
return __ioremap_uc(phys_addr); return __ioremap_uc(phys_addr);
} }
void __iomem * void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
ioremap (unsigned long phys_addr, unsigned long size) unsigned long flags)
{ {
void __iomem *addr;
struct vm_struct *area;
unsigned long offset;
pgprot_t prot;
u64 attr; u64 attr;
unsigned long gran_base, gran_size; unsigned long gran_base, gran_size;
unsigned long page_base; unsigned long page_base;
...@@ -68,36 +64,12 @@ ioremap (unsigned long phys_addr, unsigned long size) ...@@ -68,36 +64,12 @@ ioremap (unsigned long phys_addr, unsigned long size)
*/ */
page_base = phys_addr & PAGE_MASK; page_base = phys_addr & PAGE_MASK;
size = PAGE_ALIGN(phys_addr + size) - page_base; size = PAGE_ALIGN(phys_addr + size) - page_base;
if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) { if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB)
prot = PAGE_KERNEL; return generic_ioremap_prot(phys_addr, size, __pgprot(flags));
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
/*
* Ok, go for it..
*/
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
area->phys_addr = phys_addr;
addr = (void __iomem *) area->addr;
if (ioremap_page_range((unsigned long) addr,
(unsigned long) addr + size, phys_addr, prot)) {
vunmap((void __force *) addr);
return NULL;
}
return (void __iomem *) (offset + (char __iomem *)addr);
}
return __ioremap_uc(phys_addr); return __ioremap_uc(phys_addr);
} }
EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(ioremap_prot);
void __iomem * void __iomem *
ioremap_uc(unsigned long phys_addr, unsigned long size) ioremap_uc(unsigned long phys_addr, unsigned long size)
...@@ -114,8 +86,7 @@ early_iounmap (volatile void __iomem *addr, unsigned long size) ...@@ -114,8 +86,7 @@ early_iounmap (volatile void __iomem *addr, unsigned long size)
{ {
} }
void void iounmap(volatile void __iomem *addr)
iounmap (volatile void __iomem *addr)
{ {
if (REGION_NUMBER(addr) == RGN_GATE) if (REGION_NUMBER(addr) == RGN_GATE)
vunmap((void *) ((unsigned long) addr & PAGE_MASK)); vunmap((void *) ((unsigned long) addr & PAGE_MASK));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment