Commit 64dd6849 authored by Faiyaz Mohammed's avatar Faiyaz Mohammed Committed by Linus Torvalds

mm: slub: move sysfs slab alloc/free interfaces to debugfs

alloc_calls and free_calls implementation in sysfs have two issues, one is
PAGE_SIZE limitation of sysfs and other is it does not adhere to "one
value per file" rule.

To overcome this issues, move the alloc_calls and free_calls
implementation to debugfs.

Debugfs cache will be created if SLAB_STORE_USER flag is set.

Rename the alloc_calls/free_calls to alloc_traces/free_traces, to be
inline with what it does.

[faiyazm@codeaurora.org: fix the leak of alloc/free traces debugfs interface]
  Link: https://lkml.kernel.org/r/1624248060-30286-1-git-send-email-faiyazm@codeaurora.org

Link: https://lkml.kernel.org/r/1623438200-19361-1-git-send-email-faiyazm@codeaurora.orgSigned-off-by: default avatarFaiyaz Mohammed <faiyazm@codeaurora.org>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79270291
...@@ -631,6 +631,12 @@ static inline bool slab_want_init_on_free(struct kmem_cache *c) ...@@ -631,6 +631,12 @@ static inline bool slab_want_init_on_free(struct kmem_cache *c)
return false; return false;
} }
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
void debugfs_slab_release(struct kmem_cache *);
#else
static inline void debugfs_slab_release(struct kmem_cache *s) { }
#endif
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
#define KS_ADDRS_COUNT 16 #define KS_ADDRS_COUNT 16
struct kmem_obj_info { struct kmem_obj_info {
......
...@@ -448,6 +448,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) ...@@ -448,6 +448,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
rcu_barrier(); rcu_barrier();
list_for_each_entry_safe(s, s2, &to_destroy, list) { list_for_each_entry_safe(s, s2, &to_destroy, list) {
debugfs_slab_release(s);
kfence_shutdown_cache(s); kfence_shutdown_cache(s);
#ifdef SLAB_SUPPORTS_SYSFS #ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_release(s); sysfs_slab_release(s);
...@@ -475,6 +476,7 @@ static int shutdown_cache(struct kmem_cache *s) ...@@ -475,6 +476,7 @@ static int shutdown_cache(struct kmem_cache *s)
schedule_work(&slab_caches_to_rcu_destroy_work); schedule_work(&slab_caches_to_rcu_destroy_work);
} else { } else {
kfence_shutdown_cache(s); kfence_shutdown_cache(s);
debugfs_slab_release(s);
#ifdef SLAB_SUPPORTS_SYSFS #ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_unlink(s); sysfs_slab_unlink(s);
sysfs_slab_release(s); sysfs_slab_release(s);
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/random.h> #include <linux/random.h>
#include <kunit/test.h> #include <kunit/test.h>
#include <linux/debugfs.h>
#include <trace/events/kmem.h> #include <trace/events/kmem.h>
#include "internal.h" #include "internal.h"
...@@ -238,6 +239,12 @@ static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) ...@@ -238,6 +239,12 @@ static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; } { return 0; }
#endif #endif
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
static void debugfs_slab_add(struct kmem_cache *);
#else
static inline void debugfs_slab_add(struct kmem_cache *s) { }
#endif
static inline void stat(const struct kmem_cache *s, enum stat_item si) static inline void stat(const struct kmem_cache *s, enum stat_item si)
{ {
#ifdef CONFIG_SLUB_STATS #ifdef CONFIG_SLUB_STATS
...@@ -4593,6 +4600,9 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) ...@@ -4593,6 +4600,9 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
if (err) if (err)
__kmem_cache_release(s); __kmem_cache_release(s);
if (s->flags & SLAB_STORE_USER)
debugfs_slab_add(s);
return err; return err;
} }
...@@ -4739,6 +4749,7 @@ long validate_slab_cache(struct kmem_cache *s) ...@@ -4739,6 +4749,7 @@ long validate_slab_cache(struct kmem_cache *s)
} }
EXPORT_SYMBOL(validate_slab_cache); EXPORT_SYMBOL(validate_slab_cache);
#ifdef CONFIG_DEBUG_FS
/* /*
* Generate lists of code addresses where slabcache objects are allocated * Generate lists of code addresses where slabcache objects are allocated
* and freed. * and freed.
...@@ -4762,6 +4773,8 @@ struct loc_track { ...@@ -4762,6 +4773,8 @@ struct loc_track {
struct location *loc; struct location *loc;
}; };
static struct dentry *slab_debugfs_root;
static void free_loc_track(struct loc_track *t) static void free_loc_track(struct loc_track *t)
{ {
if (t->max) if (t->max)
...@@ -4878,82 +4891,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s, ...@@ -4878,82 +4891,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
add_location(t, s, get_track(s, p, alloc)); add_location(t, s, get_track(s, p, alloc));
put_map(map); put_map(map);
} }
#endif /* CONFIG_DEBUG_FS */
static int list_locations(struct kmem_cache *s, char *buf,
enum track_item alloc)
{
int len = 0;
unsigned long i;
struct loc_track t = { 0, 0, NULL };
int node;
struct kmem_cache_node *n;
if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
GFP_KERNEL)) {
return sysfs_emit(buf, "Out of memory\n");
}
/* Push back cpu slabs */
flush_all(s);
for_each_kmem_cache_node(s, node, n) {
unsigned long flags;
struct page *page;
if (!atomic_long_read(&n->nr_slabs))
continue;
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list)
process_slab(&t, s, page, alloc);
list_for_each_entry(page, &n->full, slab_list)
process_slab(&t, s, page, alloc);
spin_unlock_irqrestore(&n->list_lock, flags);
}
for (i = 0; i < t.count; i++) {
struct location *l = &t.loc[i];
len += sysfs_emit_at(buf, len, "%7ld ", l->count);
if (l->addr)
len += sysfs_emit_at(buf, len, "%pS", (void *)l->addr);
else
len += sysfs_emit_at(buf, len, "<not-available>");
if (l->sum_time != l->min_time)
len += sysfs_emit_at(buf, len, " age=%ld/%ld/%ld",
l->min_time,
(long)div_u64(l->sum_time,
l->count),
l->max_time);
else
len += sysfs_emit_at(buf, len, " age=%ld", l->min_time);
if (l->min_pid != l->max_pid)
len += sysfs_emit_at(buf, len, " pid=%ld-%ld",
l->min_pid, l->max_pid);
else
len += sysfs_emit_at(buf, len, " pid=%ld",
l->min_pid);
if (num_online_cpus() > 1 &&
!cpumask_empty(to_cpumask(l->cpus)))
len += sysfs_emit_at(buf, len, " cpus=%*pbl",
cpumask_pr_args(to_cpumask(l->cpus)));
if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
len += sysfs_emit_at(buf, len, " nodes=%*pbl",
nodemask_pr_args(&l->nodes));
len += sysfs_emit_at(buf, len, "\n");
}
free_loc_track(&t);
if (!t.count)
len += sysfs_emit_at(buf, len, "No data\n");
return len;
}
#endif /* CONFIG_SLUB_DEBUG */ #endif /* CONFIG_SLUB_DEBUG */
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
...@@ -5343,21 +5281,6 @@ static ssize_t validate_store(struct kmem_cache *s, ...@@ -5343,21 +5281,6 @@ static ssize_t validate_store(struct kmem_cache *s,
} }
SLAB_ATTR(validate); SLAB_ATTR(validate);
static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
{
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
return list_locations(s, buf, TRACK_ALLOC);
}
SLAB_ATTR_RO(alloc_calls);
static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
{
if (!(s->flags & SLAB_STORE_USER))
return -ENOSYS;
return list_locations(s, buf, TRACK_FREE);
}
SLAB_ATTR_RO(free_calls);
#endif /* CONFIG_SLUB_DEBUG */ #endif /* CONFIG_SLUB_DEBUG */
#ifdef CONFIG_FAILSLAB #ifdef CONFIG_FAILSLAB
...@@ -5521,8 +5444,6 @@ static struct attribute *slab_attrs[] = { ...@@ -5521,8 +5444,6 @@ static struct attribute *slab_attrs[] = {
&poison_attr.attr, &poison_attr.attr,
&store_user_attr.attr, &store_user_attr.attr,
&validate_attr.attr, &validate_attr.attr,
&alloc_calls_attr.attr,
&free_calls_attr.attr,
#endif #endif
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
&cache_dma_attr.attr, &cache_dma_attr.attr,
...@@ -5810,6 +5731,173 @@ static int __init slab_sysfs_init(void) ...@@ -5810,6 +5731,173 @@ static int __init slab_sysfs_init(void)
__initcall(slab_sysfs_init); __initcall(slab_sysfs_init);
#endif /* CONFIG_SYSFS */ #endif /* CONFIG_SYSFS */
#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
static int slab_debugfs_show(struct seq_file *seq, void *v)
{
struct location *l;
unsigned int idx = *(unsigned int *)v;
struct loc_track *t = seq->private;
if (idx < t->count) {
l = &t->loc[idx];
seq_printf(seq, "%7ld ", l->count);
if (l->addr)
seq_printf(seq, "%pS", (void *)l->addr);
else
seq_puts(seq, "<not-available>");
if (l->sum_time != l->min_time) {
seq_printf(seq, " age=%ld/%llu/%ld",
l->min_time, div_u64(l->sum_time, l->count),
l->max_time);
} else
seq_printf(seq, " age=%ld", l->min_time);
if (l->min_pid != l->max_pid)
seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
else
seq_printf(seq, " pid=%ld",
l->min_pid);
if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
seq_printf(seq, " cpus=%*pbl",
cpumask_pr_args(to_cpumask(l->cpus)));
if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
seq_printf(seq, " nodes=%*pbl",
nodemask_pr_args(&l->nodes));
seq_puts(seq, "\n");
}
if (!idx && !t->count)
seq_puts(seq, "No data\n");
return 0;
}
static void slab_debugfs_stop(struct seq_file *seq, void *v)
{
}
static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
{
struct loc_track *t = seq->private;
v = ppos;
++*ppos;
if (*ppos <= t->count)
return v;
return NULL;
}
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
{
return ppos;
}
static const struct seq_operations slab_debugfs_sops = {
.start = slab_debugfs_start,
.next = slab_debugfs_next,
.stop = slab_debugfs_stop,
.show = slab_debugfs_show,
};
static int slab_debug_trace_open(struct inode *inode, struct file *filep)
{
struct kmem_cache_node *n;
enum track_item alloc;
int node;
struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
sizeof(struct loc_track));
struct kmem_cache *s = file_inode(filep)->i_private;
if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
alloc = TRACK_ALLOC;
else
alloc = TRACK_FREE;
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL))
return -ENOMEM;
/* Push back cpu slabs */
flush_all(s);
for_each_kmem_cache_node(s, node, n) {
unsigned long flags;
struct page *page;
if (!atomic_long_read(&n->nr_slabs))
continue;
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list)
process_slab(t, s, page, alloc);
list_for_each_entry(page, &n->full, slab_list)
process_slab(t, s, page, alloc);
spin_unlock_irqrestore(&n->list_lock, flags);
}
return 0;
}
static int slab_debug_trace_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct loc_track *t = seq->private;
free_loc_track(t);
return seq_release_private(inode, file);
}
static const struct file_operations slab_debugfs_fops = {
.open = slab_debug_trace_open,
.read = seq_read,
.llseek = seq_lseek,
.release = slab_debug_trace_release,
};
static void debugfs_slab_add(struct kmem_cache *s)
{
struct dentry *slab_cache_dir;
if (unlikely(!slab_debugfs_root))
return;
slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
debugfs_create_file("alloc_traces", 0400,
slab_cache_dir, s, &slab_debugfs_fops);
debugfs_create_file("free_traces", 0400,
slab_cache_dir, s, &slab_debugfs_fops);
}
void debugfs_slab_release(struct kmem_cache *s)
{
debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root));
}
static int __init slab_debugfs_init(void)
{
struct kmem_cache *s;
slab_debugfs_root = debugfs_create_dir("slab", NULL);
list_for_each_entry(s, &slab_caches, list)
if (s->flags & SLAB_STORE_USER)
debugfs_slab_add(s);
return 0;
}
__initcall(slab_debugfs_init);
#endif
/* /*
* The /proc/slabinfo ABI * The /proc/slabinfo ABI
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment