Commit 9e54dd8b authored by Kent Overstreet's avatar Kent Overstreet Committed by Andrew Morton

rhashtable: plumb through alloc tag

This gives better memory allocation profiling results; rhashtable
allocations will be accounted to the code that initialized the rhashtable.

[surenb@google.com: undo _noprof additions in the documentation]
  Link: https://lkml.kernel.org/r/20240326231453.1206227-1-surenb@google.com
Link: https://lkml.kernel.org/r/20240321163705.3067592-32-surenb@google.comSigned-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Tested-by: default avatarKees Cook <keescook@chromium.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alex Gaynor <alex.gaynor@gmail.com>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andreas Hindborg <a.hindborg@samsung.com>
Cc: Benno Lossin <benno.lossin@proton.me>
Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Gary Guo <gary@garyguo.net>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 88ae5fb7
...@@ -152,6 +152,8 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) ...@@ -152,6 +152,8 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
ref->ct = NULL; ref->ct = NULL;
} }
#define alloc_tag_record(p) ((p) = current->alloc_tag)
#else /* CONFIG_MEM_ALLOC_PROFILING */ #else /* CONFIG_MEM_ALLOC_PROFILING */
#define DEFINE_ALLOC_TAG(_alloc_tag) #define DEFINE_ALLOC_TAG(_alloc_tag)
...@@ -159,6 +161,7 @@ static inline bool mem_alloc_profiling_enabled(void) { return false; } ...@@ -159,6 +161,7 @@ static inline bool mem_alloc_profiling_enabled(void) { return false; }
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
size_t bytes) {} size_t bytes) {}
static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
#define alloc_tag_record(p) do {} while (0)
#endif /* CONFIG_MEM_ALLOC_PROFILING */ #endif /* CONFIG_MEM_ALLOC_PROFILING */
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#ifndef _LINUX_RHASHTABLE_TYPES_H #ifndef _LINUX_RHASHTABLE_TYPES_H
#define _LINUX_RHASHTABLE_TYPES_H #define _LINUX_RHASHTABLE_TYPES_H
#include <linux/alloc_tag.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/mutex.h> #include <linux/mutex.h>
...@@ -88,6 +89,9 @@ struct rhashtable { ...@@ -88,6 +89,9 @@ struct rhashtable {
struct mutex mutex; struct mutex mutex;
spinlock_t lock; spinlock_t lock;
atomic_t nelems; atomic_t nelems;
#ifdef CONFIG_MEM_ALLOC_PROFILING
struct alloc_tag *alloc_tag;
#endif
}; };
/** /**
...@@ -127,9 +131,12 @@ struct rhashtable_iter { ...@@ -127,9 +131,12 @@ struct rhashtable_iter {
bool end_of_table; bool end_of_table;
}; };
int rhashtable_init(struct rhashtable *ht, int rhashtable_init_noprof(struct rhashtable *ht,
const struct rhashtable_params *params); const struct rhashtable_params *params);
int rhltable_init(struct rhltable *hlt, #define rhashtable_init(...) alloc_hooks(rhashtable_init_noprof(__VA_ARGS__))
int rhltable_init_noprof(struct rhltable *hlt,
const struct rhashtable_params *params); const struct rhashtable_params *params);
#define rhltable_init(...) alloc_hooks(rhltable_init_noprof(__VA_ARGS__))
#endif /* _LINUX_RHASHTABLE_TYPES_H */ #endif /* _LINUX_RHASHTABLE_TYPES_H */
...@@ -130,7 +130,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht, ...@@ -130,7 +130,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
if (ntbl) if (ntbl)
return ntbl; return ntbl;
ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); ntbl = alloc_hooks_tag(ht->alloc_tag,
kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO));
if (ntbl && leaf) { if (ntbl && leaf) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
...@@ -157,7 +158,8 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, ...@@ -157,7 +158,8 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
size = sizeof(*tbl) + sizeof(tbl->buckets[0]); size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
tbl = kzalloc(size, gfp); tbl = alloc_hooks_tag(ht->alloc_tag,
kmalloc_noprof(size, gfp|__GFP_ZERO));
if (!tbl) if (!tbl)
return NULL; return NULL;
...@@ -181,7 +183,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, ...@@ -181,7 +183,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
int i; int i;
static struct lock_class_key __key; static struct lock_class_key __key;
tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); tbl = alloc_hooks_tag(ht->alloc_tag,
kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets),
gfp|__GFP_ZERO, NUMA_NO_NODE));
size = nbuckets; size = nbuckets;
...@@ -1016,7 +1020,7 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) ...@@ -1016,7 +1020,7 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
* .obj_hashfn = my_hash_fn, * .obj_hashfn = my_hash_fn,
* }; * };
*/ */
int rhashtable_init(struct rhashtable *ht, int rhashtable_init_noprof(struct rhashtable *ht,
const struct rhashtable_params *params) const struct rhashtable_params *params)
{ {
struct bucket_table *tbl; struct bucket_table *tbl;
...@@ -1031,6 +1035,8 @@ int rhashtable_init(struct rhashtable *ht, ...@@ -1031,6 +1035,8 @@ int rhashtable_init(struct rhashtable *ht,
spin_lock_init(&ht->lock); spin_lock_init(&ht->lock);
memcpy(&ht->p, params, sizeof(*params)); memcpy(&ht->p, params, sizeof(*params));
alloc_tag_record(ht->alloc_tag);
if (params->min_size) if (params->min_size)
ht->p.min_size = roundup_pow_of_two(params->min_size); ht->p.min_size = roundup_pow_of_two(params->min_size);
...@@ -1076,7 +1082,7 @@ int rhashtable_init(struct rhashtable *ht, ...@@ -1076,7 +1082,7 @@ int rhashtable_init(struct rhashtable *ht,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(rhashtable_init); EXPORT_SYMBOL_GPL(rhashtable_init_noprof);
/** /**
* rhltable_init - initialize a new hash list table * rhltable_init - initialize a new hash list table
...@@ -1087,15 +1093,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init); ...@@ -1087,15 +1093,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
* *
* See documentation for rhashtable_init. * See documentation for rhashtable_init.
*/ */
int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params)
{ {
int err; int err;
err = rhashtable_init(&hlt->ht, params); err = rhashtable_init_noprof(&hlt->ht, params);
hlt->ht.rhlist = true; hlt->ht.rhlist = true;
return err; return err;
} }
EXPORT_SYMBOL_GPL(rhltable_init); EXPORT_SYMBOL_GPL(rhltable_init_noprof);
static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
void (*free_fn)(void *ptr, void *arg), void (*free_fn)(void *ptr, void *arg),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment