Commit c66a36af authored by Namhyung Kim's avatar Namhyung Kim Committed by Arnaldo Carvalho de Melo

perf lock contention: Do not use BPF task local storage

It caused some troubles when a lock inside kmalloc is contended
because task local storage would allocate memory using kmalloc.
It'd create a recusion and even crash in my system.

There could be a couple of workarounds but I think the simplest
one is to use a pre-allocated hash map.  We could fix the task
local storage to use the safe BPF allocator, but it takes time
so let's change this until it happens actually.
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Acked-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Blake Jones <blakejones@google.com>
Cc: Chris Li <chriscli@google.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <song@kernel.org>
Cc: bpf@vger.kernel.org
Link: https://lore.kernel.org/r/20221118190109.1512674-1-namhyung@kernel.orgSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 7b7c22cc
......@@ -39,6 +39,7 @@ int lock_contention_prepare(struct lock_contention *con)
bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
if (target__has_cpu(target))
ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
......
......@@ -40,10 +40,10 @@ struct {
/* maintain timestamp at the beginning of contention */
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, struct tstamp_data);
__uint(max_entries, MAX_ENTRIES);
} tstamp SEC(".maps");
/* actual lock contention statistics */
......@@ -103,18 +103,28 @@ static inline int can_record(void)
SEC("tp_btf/contention_begin")
int contention_begin(u64 *ctx)
{
struct task_struct *curr;
__u32 pid;
struct tstamp_data *pelem;
if (!enabled || !can_record())
return 0;
curr = bpf_get_current_task_btf();
pelem = bpf_task_storage_get(&tstamp, curr, NULL,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!pelem || pelem->lock)
pid = bpf_get_current_pid_tgid();
pelem = bpf_map_lookup_elem(&tstamp, &pid);
if (pelem && pelem->lock)
return 0;
if (pelem == NULL) {
struct tstamp_data zero = {};
bpf_map_update_elem(&tstamp, &pid, &zero, BPF_ANY);
pelem = bpf_map_lookup_elem(&tstamp, &pid);
if (pelem == NULL) {
lost++;
return 0;
}
}
pelem->timestamp = bpf_ktime_get_ns();
pelem->lock = (__u64)ctx[0];
pelem->flags = (__u32)ctx[1];
......@@ -128,7 +138,7 @@ int contention_begin(u64 *ctx)
SEC("tp_btf/contention_end")
int contention_end(u64 *ctx)
{
struct task_struct *curr;
__u32 pid;
struct tstamp_data *pelem;
struct contention_key key;
struct contention_data *data;
......@@ -137,8 +147,8 @@ int contention_end(u64 *ctx)
if (!enabled)
return 0;
curr = bpf_get_current_task_btf();
pelem = bpf_task_storage_get(&tstamp, curr, NULL, 0);
pid = bpf_get_current_pid_tgid();
pelem = bpf_map_lookup_elem(&tstamp, &pid);
if (!pelem || pelem->lock != ctx[0])
return 0;
......@@ -156,7 +166,7 @@ int contention_end(u64 *ctx)
};
bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
pelem->lock = 0;
bpf_map_delete_elem(&tstamp, &pid);
return 0;
}
......@@ -169,7 +179,7 @@ int contention_end(u64 *ctx)
if (data->min_time > duration)
data->min_time = duration;
pelem->lock = 0;
bpf_map_delete_elem(&tstamp, &pid);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment