Commit 55de0f2e authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar

perf_counter, x86: rename intel only functions

[ Impact: cleanup ]
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-13-git-send-email-robert.richter@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 26816c28
...@@ -725,7 +725,7 @@ static void x86_pmu_disable(struct perf_counter *counter) ...@@ -725,7 +725,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
* Save and restart an expired counter. Called by NMI contexts, * Save and restart an expired counter. Called by NMI contexts,
* so it has to be careful about preempting normal counter ops: * so it has to be careful about preempting normal counter ops:
*/ */
static void perf_save_and_restart(struct perf_counter *counter) static void intel_pmu_save_and_restart(struct perf_counter *counter)
{ {
struct hw_perf_counter *hwc = &counter->hw; struct hw_perf_counter *hwc = &counter->hw;
int idx = hwc->idx; int idx = hwc->idx;
...@@ -753,7 +753,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -753,7 +753,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
int ret = 0; int ret = 0;
cpuc->throttle_ctrl = hw_perf_save_disable(); cpuc->throttle_ctrl = intel_pmu_save_disable_all();
status = intel_pmu_get_status(cpuc->throttle_ctrl); status = intel_pmu_get_status(cpuc->throttle_ctrl);
if (!status) if (!status)
...@@ -770,7 +770,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -770,7 +770,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
if (!counter) if (!counter)
continue; continue;
perf_save_and_restart(counter); intel_pmu_save_and_restart(counter);
if (perf_counter_overflow(counter, nmi, regs, 0)) if (perf_counter_overflow(counter, nmi, regs, 0))
__x86_pmu_disable(counter, &counter->hw, bit); __x86_pmu_disable(counter, &counter->hw, bit);
} }
...@@ -788,7 +788,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -788,7 +788,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
* Restore - do not reenable when global enable is off or throttled: * Restore - do not reenable when global enable is off or throttled:
*/ */
if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS) if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
hw_perf_restore(cpuc->throttle_ctrl); intel_pmu_restore_all(cpuc->throttle_ctrl);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment