Commit 1229735b authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

perf/x86/intel/uncore: Make code more readable

Clean up the code a bit before reworking it completely.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Harish Chegondi <harish.chegondi@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/20160222221011.204771538@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a46195f1
...@@ -217,7 +217,8 @@ u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) ...@@ -217,7 +217,8 @@ u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx)
return config; return config;
} }
static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx) static void uncore_assign_hw_event(struct intel_uncore_box *box,
struct perf_event *event, int idx)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
...@@ -312,18 +313,19 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) ...@@ -312,18 +313,19 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
box->hrtimer.function = uncore_pmu_hrtimer; box->hrtimer.function = uncore_pmu_hrtimer;
} }
static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node) static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
int node)
{ {
int i, size, numshared = type->num_shared_regs ;
struct intel_uncore_box *box; struct intel_uncore_box *box;
int i, size;
size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg);
box = kzalloc_node(size, GFP_KERNEL, node); box = kzalloc_node(size, GFP_KERNEL, node);
if (!box) if (!box)
return NULL; return NULL;
for (i = 0; i < type->num_shared_regs; i++) for (i = 0; i < numshared; i++)
raw_spin_lock_init(&box->shared_regs[i].lock); raw_spin_lock_init(&box->shared_regs[i].lock);
uncore_pmu_init_hrtimer(box); uncore_pmu_init_hrtimer(box);
...@@ -351,7 +353,8 @@ static bool is_uncore_event(struct perf_event *event) ...@@ -351,7 +353,8 @@ static bool is_uncore_event(struct perf_event *event)
} }
static int static int
uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
bool dogrp)
{ {
struct perf_event *event; struct perf_event *event;
int n, max_count; int n, max_count;
...@@ -412,7 +415,8 @@ uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *eve ...@@ -412,7 +415,8 @@ uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *eve
return &type->unconstrainted; return &type->unconstrainted;
} }
static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event) static void uncore_put_event_constraint(struct intel_uncore_box *box,
struct perf_event *event)
{ {
if (box->pmu->type->ops->put_constraint) if (box->pmu->type->ops->put_constraint)
box->pmu->type->ops->put_constraint(box, event); box->pmu->type->ops->put_constraint(box, event);
...@@ -592,7 +596,7 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags) ...@@ -592,7 +596,7 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags)
if (event == box->event_list[i]) { if (event == box->event_list[i]) {
uncore_put_event_constraint(box, event); uncore_put_event_constraint(box, event);
while (++i < box->n_events) for (++i; i < box->n_events; i++)
box->event_list[i - 1] = box->event_list[i]; box->event_list[i - 1] = box->event_list[i];
--box->n_events; --box->n_events;
...@@ -801,10 +805,8 @@ static void __init uncore_type_exit(struct intel_uncore_type *type) ...@@ -801,10 +805,8 @@ static void __init uncore_type_exit(struct intel_uncore_type *type)
static void __init uncore_types_exit(struct intel_uncore_type **types) static void __init uncore_types_exit(struct intel_uncore_type **types)
{ {
int i; for (; *types; types++)
uncore_type_exit(*types);
for (i = 0; types[i]; i++)
uncore_type_exit(types[i]);
} }
static int __init uncore_type_init(struct intel_uncore_type *type) static int __init uncore_type_init(struct intel_uncore_type *type)
...@@ -908,9 +910,11 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id ...@@ -908,9 +910,11 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
* some device types. Hence PCI device idx would be 0 for all devices. * some device types. Hence PCI device idx would be 0 for all devices.
* So increment pmu pointer to point to an unused array element. * So increment pmu pointer to point to an unused array element.
*/ */
if (boot_cpu_data.x86_model == 87) if (boot_cpu_data.x86_model == 87) {
while (pmu->func_id >= 0) while (pmu->func_id >= 0)
pmu++; pmu++;
}
if (pmu->func_id < 0) if (pmu->func_id < 0)
pmu->func_id = pdev->devfn; pmu->func_id = pdev->devfn;
else else
...@@ -1170,44 +1174,45 @@ static int uncore_cpu_prepare(int cpu, int phys_id) ...@@ -1170,44 +1174,45 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
return 0; return 0;
} }
static void static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) int new_cpu)
{ {
struct intel_uncore_type *type; struct intel_uncore_pmu *pmu = type->pmus;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box; struct intel_uncore_box *box;
int i, j; int i;
for (i = 0; uncores[i]; i++) {
type = uncores[i];
for (j = 0; j < type->num_boxes; j++) {
pmu = &type->pmus[j];
if (old_cpu < 0)
box = uncore_pmu_to_box(pmu, new_cpu);
else
box = uncore_pmu_to_box(pmu, old_cpu);
if (!box)
continue;
if (old_cpu < 0) { for (i = 0; i < type->num_boxes; i++, pmu++) {
WARN_ON_ONCE(box->cpu != -1); if (old_cpu < 0)
box->cpu = new_cpu; box = uncore_pmu_to_box(pmu, new_cpu);
continue; else
} box = uncore_pmu_to_box(pmu, old_cpu);
if (!box)
continue;
WARN_ON_ONCE(box->cpu != old_cpu); if (old_cpu < 0) {
if (new_cpu >= 0) { WARN_ON_ONCE(box->cpu != -1);
uncore_pmu_cancel_hrtimer(box); box->cpu = new_cpu;
perf_pmu_migrate_context(&pmu->pmu, continue;
old_cpu, new_cpu);
box->cpu = new_cpu;
} else {
box->cpu = -1;
}
} }
WARN_ON_ONCE(box->cpu != old_cpu);
box->cpu = -1;
if (new_cpu < 0)
continue;
uncore_pmu_cancel_hrtimer(box);
perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
box->cpu = new_cpu;
} }
} }
static void uncore_change_context(struct intel_uncore_type **uncores,
int old_cpu, int new_cpu)
{
for (; *uncores; uncores++)
uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
}
static void uncore_event_exit_cpu(int cpu) static void uncore_event_exit_cpu(int cpu)
{ {
int i, phys_id, target; int i, phys_id, target;
...@@ -1318,8 +1323,8 @@ static int __init uncore_msr_pmus_register(void) ...@@ -1318,8 +1323,8 @@ static int __init uncore_msr_pmus_register(void)
struct intel_uncore_type **types = uncore_msr_uncores; struct intel_uncore_type **types = uncore_msr_uncores;
int ret; int ret;
while (*types) { for (; *types; types++) {
ret = type_pmu_register(*types++); ret = type_pmu_register(*types);
if (ret) if (ret)
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment