Commit 80552f0f authored by Qian Cai's avatar Qian Cai Committed by Borislav Petkov

mm/slab: Remove store_stackinfo()

store_stackinfo() does not seem used in actual SLAB debugging.
Potentially, it could be added to check_poison_obj() to provide more
information but this seems like an overkill due to the declining
popularity of SLAB, so just remove it instead.
Signed-off-by: default avatarQian Cai <cai@lca.pw>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: linux-mm <linux-mm@kvack.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: rientjes@google.com
Cc: sean.j.christopherson@intel.com
Link: https://lkml.kernel.org/r/20190416142258.18694-1-cai@lca.pw
parent dc4060a5
......@@ -1467,53 +1467,17 @@ static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
}
#ifdef CONFIG_DEBUG_PAGEALLOC
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
unsigned long caller)
{
int size = cachep->object_size;
addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
if (size < 5 * sizeof(unsigned long))
return;
*addr++ = 0x12345678;
*addr++ = caller;
*addr++ = smp_processor_id();
size -= 3 * sizeof(unsigned long);
{
unsigned long *sptr = &caller;
unsigned long svalue;
while (!kstack_end(sptr)) {
svalue = *sptr++;
if (kernel_text_address(svalue)) {
*addr++ = svalue;
size -= sizeof(unsigned long);
if (size <= sizeof(unsigned long))
break;
}
}
}
*addr++ = 0x87654321;
}
static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
int map, unsigned long caller)
static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map)
{
if (!is_debug_pagealloc_cache(cachep))
return;
if (caller)
store_stackinfo(cachep, objp, caller);
kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
}
#else
static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
int map, unsigned long caller) {}
int map) {}
#endif
......@@ -1661,7 +1625,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
slab_kernel_map(cachep, objp, 1, 0);
slab_kernel_map(cachep, objp, 1);
}
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
......@@ -2434,7 +2398,7 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
slab_kernel_map(cachep, objp, 0, 0);
slab_kernel_map(cachep, objp, 0);
}
}
#endif
......@@ -2813,7 +2777,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if (cachep->flags & SLAB_POISON) {
poison_obj(cachep, objp, POISON_FREE);
slab_kernel_map(cachep, objp, 0, caller);
slab_kernel_map(cachep, objp, 0);
}
return objp;
}
......@@ -3077,7 +3041,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
return objp;
if (cachep->flags & SLAB_POISON) {
check_poison_obj(cachep, objp);
slab_kernel_map(cachep, objp, 1, 0);
slab_kernel_map(cachep, objp, 1);
poison_obj(cachep, objp, POISON_INUSE);
}
if (cachep->flags & SLAB_STORE_USER)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment