Commit 1c10adbb authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar

hw_breakpoint: Introduce cpumask_of_bp()

Add the trivial helper which simply returns cpumask_of() or
cpu_possible_mask depending on bp->cpu.

Change fetch_bp_busy_slots() and toggle_bp_slot() to always do
for_each_cpu(cpumask_of_bp) to simplify the code and avoid the
code duplication.
Reported-by: default avatarVince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Acked-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/20130620155015.GA6340@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 7ab71f32
...@@ -127,6 +127,13 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type) ...@@ -127,6 +127,13 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
return count; return count;
} }
static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
{
if (bp->cpu >= 0)
return cpumask_of(bp->cpu);
return cpu_possible_mask;
}
/* /*
* Report the number of pinned/un-pinned breakpoints we have in * Report the number of pinned/un-pinned breakpoints we have in
* a given cpu (cpu > -1) or in all of them (cpu = -1). * a given cpu (cpu > -1) or in all of them (cpu = -1).
...@@ -135,25 +142,13 @@ static void ...@@ -135,25 +142,13 @@ static void
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
enum bp_type_idx type) enum bp_type_idx type)
{ {
int cpu = bp->cpu; const struct cpumask *cpumask = cpumask_of_bp(bp);
struct task_struct *tsk = bp->hw.bp_target; int cpu;
if (cpu >= 0) {
slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
if (!tsk)
slots->pinned += max_task_bp_pinned(cpu, type);
else
slots->pinned += task_bp_pinned(cpu, bp, type);
slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
return;
}
for_each_possible_cpu(cpu) { for_each_cpu(cpu, cpumask) {
unsigned int nr; unsigned int nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
nr = per_cpu(nr_cpu_bp_pinned[type], cpu); if (!bp->hw.bp_target)
if (!tsk)
nr += max_task_bp_pinned(cpu, type); nr += max_task_bp_pinned(cpu, type);
else else
nr += task_bp_pinned(cpu, bp, type); nr += task_bp_pinned(cpu, bp, type);
...@@ -205,25 +200,21 @@ static void ...@@ -205,25 +200,21 @@ static void
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
int weight) int weight)
{ {
int cpu = bp->cpu; const struct cpumask *cpumask = cpumask_of_bp(bp);
struct task_struct *tsk = bp->hw.bp_target; int cpu;
if (!enable) if (!enable)
weight = -weight; weight = -weight;
/* Pinned counter cpu profiling */ /* Pinned counter cpu profiling */
if (!tsk) { if (!bp->hw.bp_target) {
per_cpu(nr_cpu_bp_pinned[type], cpu) += weight; per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
return; return;
} }
/* Pinned counter task profiling */ /* Pinned counter task profiling */
if (cpu >= 0) { for_each_cpu(cpu, cpumask)
toggle_bp_task_slot(bp, cpu, type, weight); toggle_bp_task_slot(bp, cpu, type, weight);
} else {
for_each_possible_cpu(cpu)
toggle_bp_task_slot(bp, cpu, type, weight);
}
if (enable) if (enable)
list_add_tail(&bp->hw.bp_list, &bp_task_head); list_add_tail(&bp->hw.bp_list, &bp_task_head);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment