Commit 8fc48985 authored by Tejun Heo's avatar Tejun Heo

vmalloc: add un/map_kernel_range_noflush()

Impact: two more public map/unmap functions

Implement map_kernel_range_noflush() and unmap_kernel_range_noflush().
These functions respectively map and unmap address range in kernel VM
area but doesn't do any vcache or tlb flushing.  These will be used by
new percpu allocator.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
parent f0aa6617
...@@ -91,6 +91,9 @@ extern struct vm_struct *remove_vm_area(const void *addr); ...@@ -91,6 +91,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot, extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages); struct page ***pages);
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size);
/* Allocate/destroy a 'vmalloc' VM area. */ /* Allocate/destroy a 'vmalloc' VM area. */
......
...@@ -153,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, ...@@ -153,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
* *
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
*/ */
static int vmap_page_range(unsigned long start, unsigned long end, static int vmap_page_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages) pgprot_t prot, struct page **pages)
{ {
pgd_t *pgd; pgd_t *pgd;
unsigned long next; unsigned long next;
...@@ -170,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end, ...@@ -170,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end,
if (err) if (err)
break; break;
} while (pgd++, addr = next, addr != end); } while (pgd++, addr = next, addr != end);
flush_cache_vmap(start, end);
if (unlikely(err)) if (unlikely(err))
return err; return err;
return nr; return nr;
} }
static int vmap_page_range(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages)
{
int ret;
ret = vmap_page_range_noflush(start, end, prot, pages);
flush_cache_vmap(start, end);
return ret;
}
static inline int is_vmalloc_or_module_addr(const void *x) static inline int is_vmalloc_or_module_addr(const void *x)
{ {
/* /*
...@@ -1033,6 +1042,58 @@ void __init vmalloc_init(void) ...@@ -1033,6 +1042,58 @@ void __init vmalloc_init(void)
vmap_initialized = true; vmap_initialized = true;
} }
/**
* map_kernel_range_noflush - map kernel VM area with the specified pages
* @addr: start of the VM area to map
* @size: size of the VM area to map
* @prot: page protection flags to use
* @pages: pages to map
*
* Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
* specify should have been allocated using get_vm_area() and its
* friends.
*
* NOTE:
* This function does NOT do any cache flushing. The caller is
* responsible for calling flush_cache_vmap() on to-be-mapped areas
* before calling this function.
*
* RETURNS:
* The number of pages mapped on success, -errno on failure.
*/
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
pgprot_t prot, struct page **pages)
{
return vmap_page_range_noflush(addr, addr + size, prot, pages);
}
/**
* unmap_kernel_range_noflush - unmap kernel VM area
* @addr: start of the VM area to unmap
* @size: size of the VM area to unmap
*
* Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
* specify should have been allocated using get_vm_area() and its
* friends.
*
* NOTE:
* This function does NOT do any cache flushing. The caller is
* responsible for calling flush_cache_vunmap() on to-be-mapped areas
* before calling this function and flush_tlb_kernel_range() after.
*/
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
vunmap_page_range(addr, addr + size);
}
/**
* unmap_kernel_range - unmap kernel VM area and flush cache and TLB
* @addr: start of the VM area to unmap
* @size: size of the VM area to unmap
*
* Similar to unmap_kernel_range_noflush() but flushes vcache before
* the unmapping and tlb after.
*/
void unmap_kernel_range(unsigned long addr, unsigned long size) void unmap_kernel_range(unsigned long addr, unsigned long size)
{ {
unsigned long end = addr + size; unsigned long end = addr + size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment