Commit 92393615 authored by Andrey Ryabinin's avatar Andrey Ryabinin Committed by Linus Torvalds

mm/mempool.c: kasan: poison mempool elements

Mempools keep allocated objects in reserved for situations when ordinary
allocation may not be possible to satisfy.  These objects shouldn't be
accessed before they leave the pool.

This patch poison elements when get into the pool and unpoison when they
leave it.  This will let KASan to detect use-after-free of mempool's
elements.
Signed-off-by: default avatarAndrey Ryabinin <a.ryabinin@samsung.com>
Tested-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Chernenkov <drcheren@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Alexander Potapenko <glider@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bda6d330
...@@ -44,6 +44,7 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object); ...@@ -44,6 +44,7 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
void kasan_kmalloc_large(const void *ptr, size_t size); void kasan_kmalloc_large(const void *ptr, size_t size);
void kasan_kfree_large(const void *ptr); void kasan_kfree_large(const void *ptr);
void kasan_kfree(void *ptr);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size); void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
void kasan_krealloc(const void *object, size_t new_size); void kasan_krealloc(const void *object, size_t new_size);
...@@ -71,6 +72,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache, ...@@ -71,6 +72,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
static inline void kasan_kmalloc_large(void *ptr, size_t size) {} static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
static inline void kasan_kfree_large(const void *ptr) {} static inline void kasan_kfree_large(const void *ptr) {}
static inline void kasan_kfree(void *ptr) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size) {} size_t size) {}
static inline void kasan_krealloc(const void *object, size_t new_size) {} static inline void kasan_krealloc(const void *object, size_t new_size) {}
......
...@@ -389,6 +389,19 @@ void kasan_krealloc(const void *object, size_t size) ...@@ -389,6 +389,19 @@ void kasan_krealloc(const void *object, size_t size)
kasan_kmalloc(page->slab_cache, object, size); kasan_kmalloc(page->slab_cache, object, size);
} }
void kasan_kfree(void *ptr)
{
struct page *page;
page = virt_to_head_page(ptr);
if (unlikely(!PageSlab(page)))
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
KASAN_FREE_PAGE);
else
kasan_slab_free(page->slab_cache, ptr);
}
void kasan_kfree_large(const void *ptr) void kasan_kfree_large(const void *ptr)
{ {
struct page *page = virt_to_page(ptr); struct page *page = virt_to_page(ptr);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/kasan.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/mempool.h> #include <linux/mempool.h>
...@@ -101,10 +102,31 @@ static inline void poison_element(mempool_t *pool, void *element) ...@@ -101,10 +102,31 @@ static inline void poison_element(mempool_t *pool, void *element)
} }
#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
static void kasan_poison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab)
kasan_slab_free(pool->pool_data, element);
if (pool->alloc == mempool_kmalloc)
kasan_kfree(element);
if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data);
}
static void kasan_unpoison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab)
kasan_slab_alloc(pool->pool_data, element);
if (pool->alloc == mempool_kmalloc)
kasan_krealloc(element, (size_t)pool->pool_data);
if (pool->alloc == mempool_alloc_pages)
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
}
static void add_element(mempool_t *pool, void *element) static void add_element(mempool_t *pool, void *element)
{ {
BUG_ON(pool->curr_nr >= pool->min_nr); BUG_ON(pool->curr_nr >= pool->min_nr);
poison_element(pool, element); poison_element(pool, element);
kasan_poison_element(pool, element);
pool->elements[pool->curr_nr++] = element; pool->elements[pool->curr_nr++] = element;
} }
...@@ -114,6 +136,7 @@ static void *remove_element(mempool_t *pool) ...@@ -114,6 +136,7 @@ static void *remove_element(mempool_t *pool)
BUG_ON(pool->curr_nr < 0); BUG_ON(pool->curr_nr < 0);
check_element(pool, element); check_element(pool, element);
kasan_unpoison_element(pool, element);
return element; return element;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment