Commit 16db2839 authored by Marco Elver's avatar Marco Elver Committed by Peter Zijlstra

perf/hw_breakpoint: Introduce bp_slots_histogram

Factor out the existing `atomic_t count[N]` into its own struct called
'bp_slots_histogram', to generalize and make its intent clearer in
preparation of reusing elsewhere. The basic idea of bucketing "total
uses of N slots" resembles a histogram, so calling it such seems most
intuitive.

No functional change.
Signed-off-by: default avatarMarco Elver <elver@google.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarDmitry Vyukov <dvyukov@google.com>
Acked-by: default avatarIan Rogers <irogers@google.com>
Link: https://lore.kernel.org/r/20220829124719.675715-13-elver@google.com
parent 0912037f
...@@ -36,19 +36,27 @@ ...@@ -36,19 +36,27 @@
#include <linux/slab.h> #include <linux/slab.h>
/* /*
* Constraints data * Datastructure to track the total uses of N slots across tasks or CPUs;
* bp_slots_histogram::count[N] is the number of assigned N+1 breakpoint slots.
*/ */
struct bp_cpuinfo { struct bp_slots_histogram {
/* Number of pinned cpu breakpoints in a cpu */
unsigned int cpu_pinned;
/* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
#ifdef hw_breakpoint_slots #ifdef hw_breakpoint_slots
atomic_t tsk_pinned[hw_breakpoint_slots(0)]; atomic_t count[hw_breakpoint_slots(0)];
#else #else
atomic_t *tsk_pinned; atomic_t *count;
#endif #endif
}; };
/*
* Per-CPU constraints data.
*/
struct bp_cpuinfo {
/* Number of pinned CPU breakpoints in a CPU. */
unsigned int cpu_pinned;
/* Histogram of pinned task breakpoints in a CPU. */
struct bp_slots_histogram tsk_pinned;
};
static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]); static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type) static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
...@@ -159,6 +167,18 @@ static inline int hw_breakpoint_slots_cached(int type) ...@@ -159,6 +167,18 @@ static inline int hw_breakpoint_slots_cached(int type)
return __nr_bp_slots[type]; return __nr_bp_slots[type];
} }
static __init bool
bp_slots_histogram_alloc(struct bp_slots_histogram *hist, enum bp_type_idx type)
{
hist->count = kcalloc(hw_breakpoint_slots_cached(type), sizeof(*hist->count), GFP_KERNEL);
return hist->count;
}
static __init void bp_slots_histogram_free(struct bp_slots_histogram *hist)
{
kfree(hist->count);
}
static __init int init_breakpoint_slots(void) static __init int init_breakpoint_slots(void)
{ {
int i, cpu, err_cpu; int i, cpu, err_cpu;
...@@ -170,8 +190,7 @@ static __init int init_breakpoint_slots(void) ...@@ -170,8 +190,7 @@ static __init int init_breakpoint_slots(void)
for (i = 0; i < TYPE_MAX; i++) { for (i = 0; i < TYPE_MAX; i++) {
struct bp_cpuinfo *info = get_bp_info(cpu, i); struct bp_cpuinfo *info = get_bp_info(cpu, i);
info->tsk_pinned = kcalloc(__nr_bp_slots[i], sizeof(atomic_t), GFP_KERNEL); if (!bp_slots_histogram_alloc(&info->tsk_pinned, i))
if (!info->tsk_pinned)
goto err; goto err;
} }
} }
...@@ -180,7 +199,7 @@ static __init int init_breakpoint_slots(void) ...@@ -180,7 +199,7 @@ static __init int init_breakpoint_slots(void)
err: err:
for_each_possible_cpu(err_cpu) { for_each_possible_cpu(err_cpu) {
for (i = 0; i < TYPE_MAX; i++) for (i = 0; i < TYPE_MAX; i++)
kfree(get_bp_info(err_cpu, i)->tsk_pinned); bp_slots_histogram_free(&get_bp_info(err_cpu, i)->tsk_pinned);
if (err_cpu == cpu) if (err_cpu == cpu)
break; break;
} }
...@@ -189,6 +208,34 @@ static __init int init_breakpoint_slots(void) ...@@ -189,6 +208,34 @@ static __init int init_breakpoint_slots(void)
} }
#endif #endif
static inline void
bp_slots_histogram_add(struct bp_slots_histogram *hist, int old, int val)
{
const int old_idx = old - 1;
const int new_idx = old_idx + val;
if (old_idx >= 0)
WARN_ON(atomic_dec_return_relaxed(&hist->count[old_idx]) < 0);
if (new_idx >= 0)
WARN_ON(atomic_inc_return_relaxed(&hist->count[new_idx]) < 0);
}
static int
bp_slots_histogram_max(struct bp_slots_histogram *hist, enum bp_type_idx type)
{
for (int i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
const int count = atomic_read(&hist->count[i]);
/* Catch unexpected writers; we want a stable snapshot. */
ASSERT_EXCLUSIVE_WRITER(hist->count[i]);
if (count > 0)
return i + 1;
WARN(count < 0, "inconsistent breakpoint slots histogram");
}
return 0;
}
#ifndef hw_breakpoint_weight #ifndef hw_breakpoint_weight
static inline int hw_breakpoint_weight(struct perf_event *bp) static inline int hw_breakpoint_weight(struct perf_event *bp)
{ {
...@@ -205,13 +252,11 @@ static inline enum bp_type_idx find_slot_idx(u64 bp_type) ...@@ -205,13 +252,11 @@ static inline enum bp_type_idx find_slot_idx(u64 bp_type)
} }
/* /*
* Report the maximum number of pinned breakpoints a task * Return the maximum number of pinned breakpoints a task has in this CPU.
* have in this cpu
*/ */
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
{ {
atomic_t *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; struct bp_slots_histogram *tsk_pinned = &get_bp_info(cpu, type)->tsk_pinned;
int i;
/* /*
* At this point we want to have acquired the bp_cpuinfo_sem as a * At this point we want to have acquired the bp_cpuinfo_sem as a
...@@ -219,14 +264,7 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) ...@@ -219,14 +264,7 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
* toggle_bp_task_slot() to tsk_pinned, and we get a stable snapshot. * toggle_bp_task_slot() to tsk_pinned, and we get a stable snapshot.
*/ */
lockdep_assert_held_write(&bp_cpuinfo_sem); lockdep_assert_held_write(&bp_cpuinfo_sem);
return bp_slots_histogram_max(tsk_pinned, type);
for (i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
ASSERT_EXCLUSIVE_WRITER(tsk_pinned[i]); /* Catch unexpected writers. */
if (atomic_read(&tsk_pinned[i]) > 0)
return i + 1;
}
return 0;
} }
/* /*
...@@ -300,8 +338,7 @@ max_bp_pinned_slots(struct perf_event *bp, enum bp_type_idx type) ...@@ -300,8 +338,7 @@ max_bp_pinned_slots(struct perf_event *bp, enum bp_type_idx type)
static void toggle_bp_task_slot(struct perf_event *bp, int cpu, static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
enum bp_type_idx type, int weight) enum bp_type_idx type, int weight)
{ {
atomic_t *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned; struct bp_slots_histogram *tsk_pinned = &get_bp_info(cpu, type)->tsk_pinned;
int old_idx, new_idx;
/* /*
* If bp->hw.target, tsk_pinned is only modified, but not used * If bp->hw.target, tsk_pinned is only modified, but not used
...@@ -311,14 +348,7 @@ static void toggle_bp_task_slot(struct perf_event *bp, int cpu, ...@@ -311,14 +348,7 @@ static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
* bp_cpuinfo_sem as a writer to stabilize tsk_pinned's value. * bp_cpuinfo_sem as a writer to stabilize tsk_pinned's value.
*/ */
lockdep_assert_held_read(&bp_cpuinfo_sem); lockdep_assert_held_read(&bp_cpuinfo_sem);
bp_slots_histogram_add(tsk_pinned, task_bp_pinned(cpu, bp, type), weight);
old_idx = task_bp_pinned(cpu, bp, type) - 1;
new_idx = old_idx + weight;
if (old_idx >= 0)
atomic_dec(&tsk_pinned[old_idx]);
if (new_idx >= 0)
atomic_inc(&tsk_pinned[new_idx]);
} }
/* /*
...@@ -768,7 +798,7 @@ bool hw_breakpoint_is_used(void) ...@@ -768,7 +798,7 @@ bool hw_breakpoint_is_used(void)
return true; return true;
for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) { for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
if (atomic_read(&info->tsk_pinned[slot])) if (atomic_read(&info->tsk_pinned.count[slot]))
return true; return true;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment