Commit bdebd6a2 authored by Jann Horn's avatar Jann Horn Committed by Linus Torvalds

vmalloc: fix remap_vmalloc_range() bounds checks

remap_vmalloc_range() has had various issues with the bounds checks it
promises to perform ("This function checks that addr is a valid
vmalloc'ed area, and that it is big enough to cover the vma") over time,
e.g.:

 - not detecting pgoff<<PAGE_SHIFT overflow

 - not detecting (pgoff<<PAGE_SHIFT)+usize overflow

 - not checking whether addr and addr+(pgoff<<PAGE_SHIFT) are the same
   vmalloc allocation

 - comparing a potentially wildly out-of-bounds pointer with the end of
   the vmalloc region

In particular, since commit fc970227 ("bpf: Add mmap() support for
BPF_MAP_TYPE_ARRAY"), unprivileged users can cause kernel null pointer
dereferences by calling mmap() on a BPF map with a size that is bigger
than the distance from the start of the BPF map to the end of the
address space.

This could theoretically be used as a kernel ASLR bypass, by using
whether mmap() with a given offset oopses or returns an error code to
perform a binary search over the possible address range.

To allow remap_vmalloc_range_partial() to verify that addr and
addr+(pgoff<<PAGE_SHIFT) are in the same vmalloc region, pass the offset
to remap_vmalloc_range_partial() instead of adding it to the pointer in
remap_vmalloc_range().

In remap_vmalloc_range_partial(), fix the check against
get_vm_area_size() by using size comparisons instead of pointer
comparisons, and add checks for pgoff.

Fixes: 83342314 ("[PATCH] mm: introduce remap_vmalloc_range()")
Signed-off-by: default avatarJann Horn <jannh@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: stable@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Yonghong Song <yhs@fb.com>
Cc: Andrii Nakryiko <andriin@fb.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: KP Singh <kpsingh@chromium.org>
Link: http://lkml.kernel.org/r/20200415222312.236431-1-jannh@google.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0783ac95
...@@ -266,7 +266,8 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, ...@@ -266,7 +266,8 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
if (start < offset + dump->size) { if (start < offset + dump->size) {
tsz = min(offset + (u64)dump->size - start, (u64)size); tsz = min(offset + (u64)dump->size - start, (u64)size);
buf = dump->buf + start - offset; buf = dump->buf + start - offset;
if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) { if (remap_vmalloc_range_partial(vma, dst, buf, 0,
tsz)) {
ret = -EFAULT; ret = -EFAULT;
goto out_unlock; goto out_unlock;
} }
...@@ -624,7 +625,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) ...@@ -624,7 +625,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size); tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz; kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
if (remap_vmalloc_range_partial(vma, vma->vm_start + len, if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
kaddr, tsz)) kaddr, 0, tsz))
goto fail; goto fail;
size -= tsz; size -= tsz;
......
...@@ -137,7 +137,7 @@ extern void vunmap(const void *addr); ...@@ -137,7 +137,7 @@ extern void vunmap(const void *addr);
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
unsigned long uaddr, void *kaddr, unsigned long uaddr, void *kaddr,
unsigned long size); unsigned long pgoff, unsigned long size);
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff); unsigned long pgoff);
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/llist.h> #include <linux/llist.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/rbtree_augmented.h> #include <linux/rbtree_augmented.h>
#include <linux/overflow.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -3054,6 +3055,7 @@ long vwrite(char *buf, char *addr, unsigned long count) ...@@ -3054,6 +3055,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
* @vma: vma to cover * @vma: vma to cover
* @uaddr: target user address to start at * @uaddr: target user address to start at
* @kaddr: virtual address of vmalloc kernel memory * @kaddr: virtual address of vmalloc kernel memory
* @pgoff: offset from @kaddr to start at
* @size: size of map area * @size: size of map area
* *
* Returns: 0 for success, -Exxx on failure * Returns: 0 for success, -Exxx on failure
...@@ -3066,9 +3068,15 @@ long vwrite(char *buf, char *addr, unsigned long count) ...@@ -3066,9 +3068,15 @@ long vwrite(char *buf, char *addr, unsigned long count)
* Similar to remap_pfn_range() (see mm/memory.c) * Similar to remap_pfn_range() (see mm/memory.c)
*/ */
int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
void *kaddr, unsigned long size) void *kaddr, unsigned long pgoff,
unsigned long size)
{ {
struct vm_struct *area; struct vm_struct *area;
unsigned long off;
unsigned long end_index;
if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
return -EINVAL;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
...@@ -3082,8 +3090,10 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, ...@@ -3082,8 +3090,10 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
return -EINVAL; return -EINVAL;
if (kaddr + size > area->addr + get_vm_area_size(area)) if (check_add_overflow(size, off, &end_index) ||
end_index > get_vm_area_size(area))
return -EINVAL; return -EINVAL;
kaddr += off;
do { do {
struct page *page = vmalloc_to_page(kaddr); struct page *page = vmalloc_to_page(kaddr);
...@@ -3122,7 +3132,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, ...@@ -3122,7 +3132,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff) unsigned long pgoff)
{ {
return remap_vmalloc_range_partial(vma, vma->vm_start, return remap_vmalloc_range_partial(vma, vma->vm_start,
addr + (pgoff << PAGE_SHIFT), addr, pgoff,
vma->vm_end - vma->vm_start); vma->vm_end - vma->vm_start);
} }
EXPORT_SYMBOL(remap_vmalloc_range); EXPORT_SYMBOL(remap_vmalloc_range);
......
...@@ -418,7 +418,7 @@ static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) ...@@ -418,7 +418,7 @@ static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
return -EINVAL; return -EINVAL;
return remap_vmalloc_range_partial(vma, vma->vm_start, return remap_vmalloc_range_partial(vma, vma->vm_start,
mdev_state->memblk, mdev_state->memblk, 0,
vma->vm_end - vma->vm_start); vma->vm_end - vma->vm_start);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment