Commit 253a496d authored by Daniel Axtens's avatar Daniel Axtens Committed by Linus Torvalds

kasan: don't assume percpu shadow allocations will succeed

syzkaller and the fault injector showed that I was wrong to assume that
we could ignore percpu shadow allocation failures.

Handle failures properly.  Merge all the allocated areas back into the
free list and release the shadow, then clean up and return NULL.  The
shadow is released unconditionally, which relies upon the fact that the
release function is able to tolerate pages not being present.

Also clean up shadows in the recovery path - currently they are not
released, which leaks a bit of memory.

Link: http://lkml.kernel.org/r/20191205140407.1874-3-dja@axtens.net
Fixes: 3c5c3cfb ("kasan: support backing vmalloc space with real shadow memory")
Signed-off-by: default avatarDaniel Axtens <dja@axtens.net>
Reported-by: syzbot+82e323920b78d54aaed5@syzkaller.appspotmail.com
Reported-by: syzbot+59b7daa4315e07a994f1@syzkaller.appspotmail.com
Reviewed-by: default avatarAndrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Qian Cai <cai@lca.pw>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e218f1ca
...@@ -3288,7 +3288,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, ...@@ -3288,7 +3288,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
struct vmap_area **vas, *va; struct vmap_area **vas, *va;
struct vm_struct **vms; struct vm_struct **vms;
int area, area2, last_area, term_area; int area, area2, last_area, term_area;
unsigned long base, start, size, end, last_end; unsigned long base, start, size, end, last_end, orig_start, orig_end;
bool purged = false; bool purged = false;
enum fit_type type; enum fit_type type;
...@@ -3418,6 +3418,15 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, ...@@ -3418,6 +3418,15 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
spin_unlock(&free_vmap_area_lock); spin_unlock(&free_vmap_area_lock);
/* populate the kasan shadow space */
for (area = 0; area < nr_vms; area++) {
if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
goto err_free_shadow;
kasan_unpoison_vmalloc((void *)vas[area]->va_start,
sizes[area]);
}
/* insert all vm's */ /* insert all vm's */
spin_lock(&vmap_area_lock); spin_lock(&vmap_area_lock);
for (area = 0; area < nr_vms; area++) { for (area = 0; area < nr_vms; area++) {
...@@ -3428,13 +3437,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, ...@@ -3428,13 +3437,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
} }
spin_unlock(&vmap_area_lock); spin_unlock(&vmap_area_lock);
/* populate the shadow space outside of the lock */
for (area = 0; area < nr_vms; area++) {
/* assume success here */
kasan_populate_vmalloc(vas[area]->va_start, sizes[area]);
kasan_unpoison_vmalloc((void *)vms[area]->addr, sizes[area]);
}
kfree(vas); kfree(vas);
return vms; return vms;
...@@ -3446,8 +3448,12 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, ...@@ -3446,8 +3448,12 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
* and when pcpu_get_vm_areas() is success. * and when pcpu_get_vm_areas() is success.
*/ */
while (area--) { while (area--) {
merge_or_add_vmap_area(vas[area], &free_vmap_area_root, orig_start = vas[area]->va_start;
&free_vmap_area_list); orig_end = vas[area]->va_end;
va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
&free_vmap_area_list);
kasan_release_vmalloc(orig_start, orig_end,
va->va_start, va->va_end);
vas[area] = NULL; vas[area] = NULL;
} }
...@@ -3482,6 +3488,28 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, ...@@ -3482,6 +3488,28 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
kfree(vas); kfree(vas);
kfree(vms); kfree(vms);
return NULL; return NULL;
err_free_shadow:
spin_lock(&free_vmap_area_lock);
/*
* We release all the vmalloc shadows, even the ones for regions that
* hadn't been successfully added. This relies on kasan_release_vmalloc
* being able to tolerate this case.
*/
for (area = 0; area < nr_vms; area++) {
orig_start = vas[area]->va_start;
orig_end = vas[area]->va_end;
va = merge_or_add_vmap_area(vas[area], &free_vmap_area_root,
&free_vmap_area_list);
kasan_release_vmalloc(orig_start, orig_end,
va->va_start, va->va_end);
vas[area] = NULL;
kfree(vms[area]);
}
spin_unlock(&free_vmap_area_lock);
kfree(vas);
kfree(vms);
return NULL;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment