Commit 9a9d1d36 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'mm-enforce-ioremap-address-space-and-introduce-sparse-vm_area'

Alexei Starovoitov says:

====================
mm: Enforce ioremap address space and introduce sparse vm_area

From: Alexei Starovoitov <ast@kernel.org>

v3 -> v4
- dropped VM_XEN patch for now. It will be in the follow up.
- fixed constant as pointed out by Mike

v2 -> v3
- added Christoph's reviewed-by to patch 1
- cap commit log lines to 75 chars
- factored out common checks in patch 3 into helper
- made vm_area_unmap_pages() return void

There are various users of kernel virtual address space:
vmalloc, vmap, ioremap, xen.

- vmalloc use case dominates the usage. Such vm areas have VM_ALLOC flag
and these areas are treated differently by KASAN.

- the areas created by vmap() function should be tagged with VM_MAP
(as majority of the users do).

- ioremap areas are tagged with VM_IOREMAP and vm area start is aligned
to size of the area unlike vmalloc/vmap.

- there is also xen usage that is marked as VM_IOREMAP, but it doesn't
call ioremap_page_range() unlike all other VM_IOREMAP users.

To clean this up a bit, enforce that ioremap_page_range() checks the range
and VM_IOREMAP flag.

In addition BPF would like to reserve regions of kernel virtual address
space and populate it lazily, similar to xen use cases.
For that reason, introduce VM_SPARSE flag and vm_area_[un]map_pages()
helpers to populate this sparse area.

In the end the /proc/vmallocinfo will show
"vmalloc"
"vmap"
"ioremap"
"sparse"
categories for different kinds of address regions.

ioremap, sparse will return zero when dumped through /proc/kcore
====================

Link: https://lore.kernel.org/r/20240305030516.41519-1-alexei.starovoitov@gmail.comSigned-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 8f50d5c4 e6f79822
...@@ -35,6 +35,7 @@ struct iov_iter; /* in uio.h */ ...@@ -35,6 +35,7 @@ struct iov_iter; /* in uio.h */
#else #else
#define VM_DEFER_KMEMLEAK 0 #define VM_DEFER_KMEMLEAK 0
#endif #endif
#define VM_SPARSE 0x00001000 /* sparse vm_area. not all pages are present. */
/* bits [20..32] reserved for arch specific ioremap internals */ /* bits [20..32] reserved for arch specific ioremap internals */
...@@ -232,6 +233,10 @@ static inline bool is_vm_area_hugepages(const void *addr) ...@@ -232,6 +233,10 @@ static inline bool is_vm_area_hugepages(const void *addr)
} }
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
int vm_area_map_pages(struct vm_struct *area, unsigned long start,
unsigned long end, struct page **pages);
void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
unsigned long end);
void vunmap_range(unsigned long addr, unsigned long end); void vunmap_range(unsigned long addr, unsigned long end);
static inline void set_vm_flush_reset_perms(void *addr) static inline void set_vm_flush_reset_perms(void *addr)
{ {
......
...@@ -307,8 +307,21 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end, ...@@ -307,8 +307,21 @@ static int vmap_range_noflush(unsigned long addr, unsigned long end,
int ioremap_page_range(unsigned long addr, unsigned long end, int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot) phys_addr_t phys_addr, pgprot_t prot)
{ {
struct vm_struct *area;
int err; int err;
area = find_vm_area((void *)addr);
if (!area || !(area->flags & VM_IOREMAP)) {
WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr);
return -EINVAL;
}
if (addr != (unsigned long)area->addr ||
(void *)end != area->addr + get_vm_area_size(area)) {
WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n",
addr, end, (long)area->addr,
(long)area->addr + get_vm_area_size(area));
return -ERANGE;
}
err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
ioremap_max_page_shift); ioremap_max_page_shift);
flush_cache_vmap(addr, end); flush_cache_vmap(addr, end);
...@@ -635,6 +648,58 @@ static int vmap_pages_range(unsigned long addr, unsigned long end, ...@@ -635,6 +648,58 @@ static int vmap_pages_range(unsigned long addr, unsigned long end,
return err; return err;
} }
static int check_sparse_vm_area(struct vm_struct *area, unsigned long start,
unsigned long end)
{
might_sleep();
if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS))
return -EINVAL;
if (WARN_ON_ONCE(area->flags & VM_NO_GUARD))
return -EINVAL;
if (WARN_ON_ONCE(!(area->flags & VM_SPARSE)))
return -EINVAL;
if ((end - start) >> PAGE_SHIFT > totalram_pages())
return -E2BIG;
if (start < (unsigned long)area->addr ||
(void *)end > area->addr + get_vm_area_size(area))
return -ERANGE;
return 0;
}
/**
* vm_area_map_pages - map pages inside given sparse vm_area
* @area: vm_area
* @start: start address inside vm_area
* @end: end address inside vm_area
* @pages: pages to map (always PAGE_SIZE pages)
*/
int vm_area_map_pages(struct vm_struct *area, unsigned long start,
unsigned long end, struct page **pages)
{
int err;
err = check_sparse_vm_area(area, start, end);
if (err)
return err;
return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT);
}
/**
* vm_area_unmap_pages - unmap pages inside given sparse vm_area
* @area: vm_area
* @start: start address inside vm_area
* @end: end address inside vm_area
*/
void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
unsigned long end)
{
if (check_sparse_vm_area(area, start, end))
return;
vunmap_range(start, end);
}
int is_vmalloc_or_module_addr(const void *x) int is_vmalloc_or_module_addr(const void *x)
{ {
/* /*
...@@ -3809,9 +3874,9 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count) ...@@ -3809,9 +3874,9 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
if (flags & VMAP_RAM) if (flags & VMAP_RAM)
copied = vmap_ram_vread_iter(iter, addr, n, flags); copied = vmap_ram_vread_iter(iter, addr, n, flags);
else if (!(vm && (vm->flags & VM_IOREMAP))) else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE))))
copied = aligned_vread_iter(iter, addr, n); copied = aligned_vread_iter(iter, addr, n);
else /* IOREMAP area is treated as memory hole */ else /* IOREMAP | SPARSE area is treated as memory hole */
copied = zero_iter(iter, n); copied = zero_iter(iter, n);
addr += copied; addr += copied;
...@@ -4402,6 +4467,9 @@ static int s_show(struct seq_file *m, void *p) ...@@ -4402,6 +4467,9 @@ static int s_show(struct seq_file *m, void *p)
if (v->flags & VM_IOREMAP) if (v->flags & VM_IOREMAP)
seq_puts(m, " ioremap"); seq_puts(m, " ioremap");
if (v->flags & VM_SPARSE)
seq_puts(m, " sparse");
if (v->flags & VM_ALLOC) if (v->flags & VM_ALLOC)
seq_puts(m, " vmalloc"); seq_puts(m, " vmalloc");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment