Commit b43b3fff authored by Baoquan He's avatar Baoquan He Committed by Andrew Morton

s390: mm: convert to GENERIC_IOREMAP

By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(),
generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() and
iounmap() are all visible and available to arch.  Arch needs to provide
wrapper functions to override the generic versions if there's arch
specific handling in its ioremap_prot(), ioremap() or iounmap().  This
change will simplify implementation by removing duplicated code with
generic_ioremap_prot() and generic_iounmap(), and has the equivalent
functioality as before.

Here, add wrapper functions ioremap_prot() and iounmap() for s390's
special operation when ioremap() and iounmap().

And also replace including <asm-generic/io.h> with <asm/io.h> in
arch/s390/kernel/perf_cpum_sf.c, otherwise building error will be seen
because macro defined in <asm/io.h> can't be seen in perf_cpum_sf.c.

Link: https://lkml.kernel.org/r/20230706154520.11257-11-bhe@redhat.comSigned-off-by: default avatarBaoquan He <bhe@redhat.com>
Reviewed-by: default avatarNiklas Schnelle <schnelle@linux.ibm.com>
Tested-by: default avatarNiklas Schnelle <schnelle@linux.ibm.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brian Cain <bcain@quicinc.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Chris Zankel <chris@zankel.net>
Cc: David Laight <David.Laight@ACULAB.COM>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Helge Deller <deller@gmx.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9b994429
...@@ -143,6 +143,7 @@ config S390 ...@@ -143,6 +143,7 @@ config S390
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
select GENERIC_VDSO_TIME_NS select GENERIC_VDSO_TIME_NS
select GENERIC_IOREMAP if PCI
select HAVE_ALIGNED_STRUCT_PAGE if SLUB select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL
......
...@@ -22,11 +22,18 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); ...@@ -22,11 +22,18 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define IO_SPACE_LIMIT 0 #define IO_SPACE_LIMIT 0
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); /*
void __iomem *ioremap(phys_addr_t addr, size_t size); * I/O memory mapping functions.
void __iomem *ioremap_wc(phys_addr_t addr, size_t size); */
void __iomem *ioremap_wt(phys_addr_t addr, size_t size); #define ioremap_prot ioremap_prot
void iounmap(volatile void __iomem *addr); #define iounmap iounmap
#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL)
#define ioremap_wc(addr, size) \
ioremap_prot((addr), (size), pgprot_val(pgprot_writecombine(PAGE_KERNEL)))
#define ioremap_wt(addr, size) \
ioremap_prot((addr), (size), pgprot_val(pgprot_writethrough(PAGE_KERNEL)))
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{ {
...@@ -51,10 +58,6 @@ static inline void ioport_unmap(void __iomem *p) ...@@ -51,10 +58,6 @@ static inline void ioport_unmap(void __iomem *p)
#define pci_iomap_wc pci_iomap_wc #define pci_iomap_wc pci_iomap_wc
#define pci_iomap_wc_range pci_iomap_wc_range #define pci_iomap_wc_range pci_iomap_wc_range
#define ioremap ioremap
#define ioremap_wt ioremap_wt
#define ioremap_wc ioremap_wc
#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count) #define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count) #define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
#define memset_io(dst, val, count) zpci_memset_io(dst, val, count) #define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
......
...@@ -244,62 +244,25 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count) ...@@ -244,62 +244,25 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
zpci_memcpy_toio(to, from, count); zpci_memcpy_toio(to, from, count);
} }
static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot) void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot)
{ {
unsigned long offset, vaddr; /*
struct vm_struct *area; * When PCI MIO instructions are unavailable the "physical" address
phys_addr_t last_addr; * encodes a hint for accessing the PCI memory space it represents.
* Just pass it unchanged such that ioread/iowrite can decode it.
last_addr = addr + size - 1; */
if (!size || last_addr < addr)
return NULL;
if (!static_branch_unlikely(&have_mio)) if (!static_branch_unlikely(&have_mio))
return (void __iomem *) addr; return (void __iomem *)phys_addr;
offset = addr & ~PAGE_MASK; return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
addr &= PAGE_MASK;
size = PAGE_ALIGN(size + offset);
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
vaddr = (unsigned long) area->addr;
if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
free_vm_area(area);
return NULL;
}
return (void __iomem *) ((unsigned long) area->addr + offset);
}
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
{
return __ioremap(addr, size, __pgprot(prot));
} }
EXPORT_SYMBOL(ioremap_prot); EXPORT_SYMBOL(ioremap_prot);
void __iomem *ioremap(phys_addr_t addr, size_t size)
{
return __ioremap(addr, size, PAGE_KERNEL);
}
EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
{
return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
}
EXPORT_SYMBOL(ioremap_wc);
void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
{
return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
}
EXPORT_SYMBOL(ioremap_wt);
void iounmap(volatile void __iomem *addr) void iounmap(volatile void __iomem *addr)
{ {
if (static_branch_likely(&have_mio)) if (static_branch_likely(&have_mio))
vunmap((__force void *) ((unsigned long) addr & PAGE_MASK)); generic_iounmap(addr);
} }
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment