Commit 157e118b authored by Thomas Gleixner's avatar Thomas Gleixner

x86/mm/highmem: Use generic kmap atomic implementation

Convert X86 to the generic kmap atomic implementation and make the
iomap_atomic() naming convention consistent while at it.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20201103095857.375127260@linutronix.de
parent 389755c2
...@@ -14,10 +14,11 @@ config X86_32 ...@@ -14,10 +14,11 @@ config X86_32
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select CLKSRC_I8253 select CLKSRC_I8253
select CLONE_BACKWARDS select CLONE_BACKWARDS
select GENERIC_VDSO_32
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select KMAP_LOCAL
select MODULES_USE_ELF_REL select MODULES_USE_ELF_REL
select OLD_SIGACTION select OLD_SIGACTION
select GENERIC_VDSO_32
config X86_64 config X86_64
def_bool y def_bool y
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#include <linux/threads.h> #include <linux/threads.h>
#include <asm/kmap_types.h> #include <asm/kmap_size.h>
#else #else
#include <uapi/asm/vsyscall.h> #include <uapi/asm/vsyscall.h>
#endif #endif
...@@ -94,7 +94,7 @@ enum fixed_addresses { ...@@ -94,7 +94,7 @@ enum fixed_addresses {
#endif #endif
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
#ifdef CONFIG_PCI_MMCONFIG #ifdef CONFIG_PCI_MMCONFIG
FIX_PCIE_MCFG, FIX_PCIE_MCFG,
#endif #endif
...@@ -151,7 +151,6 @@ extern void reserve_top_address(unsigned long reserve); ...@@ -151,7 +151,6 @@ extern void reserve_top_address(unsigned long reserve);
extern int fixmaps_set; extern int fixmaps_set;
extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table; extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <asm/kmap_types.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
...@@ -58,11 +57,17 @@ extern unsigned long highstart_pfn, highend_pfn; ...@@ -58,11 +57,17 @@ extern unsigned long highstart_pfn, highend_pfn;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
#define flush_cache_kmaps() do { } while (0) #define flush_cache_kmaps() do { } while (0)
#define arch_kmap_local_post_map(vaddr, pteval) \
arch_flush_lazy_mmu_mode()
#define arch_kmap_local_post_unmap(vaddr) \
do { \
flush_tlb_one_kernel((vaddr)); \
arch_flush_lazy_mmu_mode(); \
} while (0)
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn, extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
unsigned long end_pfn); unsigned long end_pfn);
......
...@@ -9,19 +9,21 @@ ...@@ -9,19 +9,21 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
void __iomem * void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
void static inline void iounmap_atomic(void __iomem *vaddr)
iounmap_atomic(void __iomem *kvaddr); {
kunmap_local_indexed((void __force *)vaddr);
pagefault_enable();
preempt_enable();
}
int int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
void void iomap_free(resource_size_t base, unsigned long size);
iomap_free(resource_size_t base, unsigned long size);
#endif /* _ASM_X86_IOMAP_H */ #endif /* _ASM_X86_IOMAP_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_KMAP_TYPES_H
#define _ASM_X86_KMAP_TYPES_H
#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
#define __WITH_KM_FENCE
#endif
#include <asm-generic/kmap_types.h>
#undef __WITH_KM_FENCE
#endif /* _ASM_X86_KMAP_TYPES_H */
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/desc_defs.h> #include <asm/desc_defs.h>
#include <asm/kmap_types.h>
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
......
...@@ -4,65 +4,6 @@ ...@@ -4,65 +4,6 @@
#include <linux/swap.h> /* for totalram_pages */ #include <linux/swap.h> /* for totalram_pages */
#include <linux/memblock.h> #include <linux/memblock.h>
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
set_pte(kmap_pte-idx, mk_pte(page, prot));
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_atomic_high_prot);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
void *kmap_atomic_pfn(unsigned long pfn)
{
return kmap_atomic_prot_pfn(pfn, kmap_prot);
}
EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;
type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
/*
* Force other mappings to Oops if they'll try to access this
* pte without first remap it. Keeping stale mappings around
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_DEBUG_HIGHMEM
else {
BUG_ON(vaddr < PAGE_OFFSET);
BUG_ON(vaddr >= (unsigned long)high_memory);
}
#endif
}
EXPORT_SYMBOL(kunmap_atomic_high);
void __init set_highmem_pages_init(void) void __init set_highmem_pages_init(void)
{ {
struct zone *zone; struct zone *zone;
......
...@@ -394,19 +394,6 @@ kernel_physical_mapping_init(unsigned long start, ...@@ -394,19 +394,6 @@ kernel_physical_mapping_init(unsigned long start,
return last_map_addr; return last_map_addr;
} }
pte_t *kmap_pte;
static void __init kmap_init(void)
{
unsigned long kmap_vstart;
/*
* Cache the first kmap pte:
*/
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = virt_to_kpte(kmap_vstart);
}
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
static void __init permanent_kmaps_init(pgd_t *pgd_base) static void __init permanent_kmaps_init(pgd_t *pgd_base)
{ {
...@@ -712,8 +699,6 @@ void __init paging_init(void) ...@@ -712,8 +699,6 @@ void __init paging_init(void)
__flush_tlb_all(); __flush_tlb_all();
kmap_init();
/* /*
* NOTE: at this point the bootmem allocator is fully available. * NOTE: at this point the bootmem allocator is fully available.
*/ */
......
...@@ -44,28 +44,7 @@ void iomap_free(resource_size_t base, unsigned long size) ...@@ -44,28 +44,7 @@ void iomap_free(resource_size_t base, unsigned long size)
} }
EXPORT_SYMBOL_GPL(iomap_free); EXPORT_SYMBOL_GPL(iomap_free);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
preempt_disable();
pagefault_disable();
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
}
/*
* Map 'pfn' using protections 'prot'
*/
void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
{ {
/* /*
* For non-PAT systems, translate non-WB request to UC- just in * For non-PAT systems, translate non-WB request to UC- just in
...@@ -81,36 +60,8 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) ...@@ -81,36 +60,8 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
/* Filter out unsupported __PAGE_KERNEL* bits: */ /* Filter out unsupported __PAGE_KERNEL* bits: */
pgprot_val(prot) &= __default_kernel_pte_mask; pgprot_val(prot) &= __default_kernel_pte_mask;
return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); preempt_disable();
} pagefault_disable();
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
void
iounmap_atomic(void __iomem *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;
type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
/*
* Force other mappings to Oops if they'll try to access this
* pte without first remap it. Keeping stale mappings around
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
}
pagefault_enable();
preempt_enable();
} }
EXPORT_SYMBOL_GPL(iounmap_atomic); EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
...@@ -217,7 +217,7 @@ static inline void __kunmap_atomic(void *addr) ...@@ -217,7 +217,7 @@ static inline void __kunmap_atomic(void *addr)
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
#if !defined(CONFIG_KMAP_LOCAL) #if !defined(CONFIG_KMAP_LOCAL)
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) #if defined(CONFIG_HIGHMEM)
DECLARE_PER_CPU(int, __kmap_atomic_idx); DECLARE_PER_CPU(int, __kmap_atomic_idx);
......
...@@ -69,7 +69,7 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, ...@@ -69,7 +69,7 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
BUG_ON(offset >= mapping->size); BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset; phys_addr = mapping->base + offset;
return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot); return iomap_atomic_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
} }
static inline void static inline void
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#ifndef CONFIG_KMAP_LOCAL #ifndef CONFIG_KMAP_LOCAL
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) #ifdef CONFIG_HIGHMEM
DEFINE_PER_CPU(int, __kmap_atomic_idx); DEFINE_PER_CPU(int, __kmap_atomic_idx);
#endif #endif
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment