Commit b9ce5ef4 authored by Glauber Costa's avatar Glauber Costa Committed by Linus Torvalds

sl[au]b: always get the cache from its page in kmem_cache_free()

struct page already has this information.  If we start chaining caches,
this information will always be more trustworthy than whatever is passed
into the function.
Signed-off-by: default avatarGlauber Costa <glommer@parallels.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Frederic Weisbecker <fweisbec@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: JoonSoo Kim <js1304@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Rik van Riel <riel@redhat.com>
Cc: Suleiman Souhlal <suleiman@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0e9d92f2
...@@ -554,6 +554,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -554,6 +554,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
return __memcg_kmem_get_cache(cachep, gfp); return __memcg_kmem_get_cache(cachep, gfp);
} }
#else #else
static inline bool memcg_kmem_enabled(void)
{
return false;
}
static inline bool static inline bool
memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
{ {
......
...@@ -87,7 +87,6 @@ ...@@ -87,7 +87,6 @@
*/ */
#include <linux/slab.h> #include <linux/slab.h>
#include "slab.h"
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/poison.h> #include <linux/poison.h>
#include <linux/swap.h> #include <linux/swap.h>
...@@ -128,6 +127,8 @@ ...@@ -128,6 +127,8 @@
#include "internal.h" #include "internal.h"
#include "slab.h"
/* /*
* DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller code (especially in the critical paths). * 0 for faster, smaller code (especially in the critical paths).
...@@ -3883,6 +3884,9 @@ EXPORT_SYMBOL(__kmalloc); ...@@ -3883,6 +3884,9 @@ EXPORT_SYMBOL(__kmalloc);
void kmem_cache_free(struct kmem_cache *cachep, void *objp) void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{ {
unsigned long flags; unsigned long flags;
cachep = cache_from_obj(cachep, objp);
if (!cachep)
return;
local_irq_save(flags); local_irq_save(flags);
debug_check_no_locks_freed(objp, cachep->object_size); debug_check_no_locks_freed(objp, cachep->object_size);
......
...@@ -116,6 +116,13 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep, ...@@ -116,6 +116,13 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
return (is_root_cache(cachep) && !memcg) || return (is_root_cache(cachep) && !memcg) ||
(cachep->memcg_params->memcg == memcg); (cachep->memcg_params->memcg == memcg);
} }
static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
return (p == s) ||
(s->memcg_params && (p == s->memcg_params->root_cache));
}
#else #else
static inline bool is_root_cache(struct kmem_cache *s) static inline bool is_root_cache(struct kmem_cache *s)
{ {
...@@ -127,5 +134,37 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep, ...@@ -127,5 +134,37 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
{ {
return true; return true;
} }
static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
return true;
}
#endif #endif
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
struct kmem_cache *cachep;
struct page *page;
/*
* When kmemcg is not being used, both assignments should return the
* same value. but we don't want to pay the assignment price in that
* case. If it is not compiled in, the compiler should be smart enough
* to not do even the assignment. In that case, slab_equal_or_root
* will also be a constant.
*/
if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
return s;
page = virt_to_head_page(x);
cachep = page->slab_cache;
if (slab_equal_or_root(cachep, s))
return cachep;
pr_err("%s: Wrong slab cache. %s but object is from %s\n",
__FUNCTION__, cachep->name, s->name);
WARN_ON_ONCE(1);
return s;
}
#endif #endif
...@@ -58,7 +58,6 @@ ...@@ -58,7 +58,6 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "slab.h"
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */ #include <linux/swap.h> /* struct reclaim_state */
...@@ -73,6 +72,7 @@ ...@@ -73,6 +72,7 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include "slab.h"
/* /*
* slob_block has a field 'units', which indicates size of block if +ve, * slob_block has a field 'units', which indicates size of block if +ve,
* or offset of next block if -ve (in SLOB_UNITs). * or offset of next block if -ve (in SLOB_UNITs).
......
...@@ -2611,19 +2611,10 @@ static __always_inline void slab_free(struct kmem_cache *s, ...@@ -2611,19 +2611,10 @@ static __always_inline void slab_free(struct kmem_cache *s,
void kmem_cache_free(struct kmem_cache *s, void *x) void kmem_cache_free(struct kmem_cache *s, void *x)
{ {
struct page *page; s = cache_from_obj(s, x);
if (!s)
page = virt_to_head_page(x);
if (kmem_cache_debug(s) && page->slab_cache != s) {
pr_err("kmem_cache_free: Wrong slab cache. %s but object"
" is from %s\n", page->slab_cache->name, s->name);
WARN_ON_ONCE(1);
return; return;
} slab_free(s, virt_to_head_page(x), x, _RET_IP_);
slab_free(s, page, x, _RET_IP_);
trace_kmem_cache_free(_RET_IP_, x); trace_kmem_cache_free(_RET_IP_, x);
} }
EXPORT_SYMBOL(kmem_cache_free); EXPORT_SYMBOL(kmem_cache_free);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment