Commit 44405099 authored by Long Li's avatar Long Li Committed by Linus Torvalds

mm, slab: check GFP_SLAB_BUG_MASK before alloc_pages in kmalloc_order

kmalloc cannot allocate memory from HIGHMEM.  Allocating large amounts of
memory currently bypasses the check and will simply leak the memory when
page_address() returns NULL.  To fix this, factor the GFP_SLAB_BUG_MASK
check out of slab & slub, and call it from kmalloc_order() as well.  In
order to make the code clear, the warning message is put in one place.
Signed-off-by: default avatarLong Li <lonuxli.64@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarPekka Enberg <penberg@kernel.org>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Link: http://lkml.kernel.org/r/20200704035027.GA62481@lilongSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dabc3e29
...@@ -2589,13 +2589,9 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, ...@@ -2589,13 +2589,9 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
* Be lazy and only check for valid flags here, keeping it out of the * Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc(). * critical path in kmem_cache_alloc().
*/ */
if (unlikely(flags & GFP_SLAB_BUG_MASK)) { if (unlikely(flags & GFP_SLAB_BUG_MASK))
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; flags = kmalloc_fix_flags(flags);
flags &= ~GFP_SLAB_BUG_MASK;
pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
invalid_mask, &invalid_mask, flags, &flags);
dump_stack();
}
WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
......
...@@ -152,6 +152,7 @@ void create_kmalloc_caches(slab_flags_t); ...@@ -152,6 +152,7 @@ void create_kmalloc_caches(slab_flags_t);
struct kmem_cache *kmalloc_slab(size_t, gfp_t); struct kmem_cache *kmalloc_slab(size_t, gfp_t);
#endif #endif
gfp_t kmalloc_fix_flags(gfp_t flags);
/* Functions provided by the slab allocators */ /* Functions provided by the slab allocators */
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/kmem.h> #include <trace/events/kmem.h>
#include "internal.h"
#include "slab.h" #include "slab.h"
enum slab_state slab_state; enum slab_state slab_state;
...@@ -1332,6 +1334,18 @@ void __init create_kmalloc_caches(slab_flags_t flags) ...@@ -1332,6 +1334,18 @@ void __init create_kmalloc_caches(slab_flags_t flags)
} }
#endif /* !CONFIG_SLOB */ #endif /* !CONFIG_SLOB */
gfp_t kmalloc_fix_flags(gfp_t flags)
{
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
flags &= ~GFP_SLAB_BUG_MASK;
pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
invalid_mask, &invalid_mask, flags, &flags);
dump_stack();
return flags;
}
/* /*
* To avoid unnecessary overhead, we pass through large allocation requests * To avoid unnecessary overhead, we pass through large allocation requests
* directly to the page allocator. We use __GFP_COMP, because we will need to * directly to the page allocator. We use __GFP_COMP, because we will need to
...@@ -1342,6 +1356,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) ...@@ -1342,6 +1356,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
void *ret = NULL; void *ret = NULL;
struct page *page; struct page *page;
if (unlikely(flags & GFP_SLAB_BUG_MASK))
flags = kmalloc_fix_flags(flags);
flags |= __GFP_COMP; flags |= __GFP_COMP;
page = alloc_pages(flags, order); page = alloc_pages(flags, order);
if (likely(page)) { if (likely(page)) {
......
...@@ -1745,13 +1745,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1745,13 +1745,8 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{ {
if (unlikely(flags & GFP_SLAB_BUG_MASK)) { if (unlikely(flags & GFP_SLAB_BUG_MASK))
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; flags = kmalloc_fix_flags(flags);
flags &= ~GFP_SLAB_BUG_MASK;
pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
invalid_mask, &invalid_mask, flags, &flags);
dump_stack();
}
return allocate_slab(s, return allocate_slab(s,
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment