Commit 0e0162df authored by Kan Liang's avatar Kan Liang Committed by Ingo Molnar

perf/x86/intel/uncore: Add infrastructure for free running counters

There are a number of free running counters introduced for uncore, which
provide highly valuable information to a wide array of customers.
However, the generic uncore code doesn't support them yet.

The free running counters will be specially handled based on their
unique attributes:

 - They are read-only. They cannot be enabled/disabled.

 - The event and the counter are always 1:1 mapped. It doesn't need to
   be assigned nor tracked by event_list.

 - They are always active. It doesn't need to check the availability.

 - They have different bit width.

Also, using inline helpers to replace the check for fixed counter and
free running counter.
Signed-off-by: default avatarKan Liang <kan.liang@intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: acme@kernel.org
Cc: eranian@google.com
Link: http://lkml.kernel.org/r/1525371913-10597-5-git-send-email-kan.liang@intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 927b2deb
...@@ -203,7 +203,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, ...@@ -203,7 +203,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box,
hwc->idx = idx; hwc->idx = idx;
hwc->last_tag = ++box->tags[idx]; hwc->last_tag = ++box->tags[idx];
if (hwc->idx == UNCORE_PMC_IDX_FIXED) { if (uncore_pmc_fixed(hwc->idx)) {
hwc->event_base = uncore_fixed_ctr(box); hwc->event_base = uncore_fixed_ctr(box);
hwc->config_base = uncore_fixed_ctl(box); hwc->config_base = uncore_fixed_ctl(box);
return; return;
...@@ -218,7 +218,9 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e ...@@ -218,7 +218,9 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e
u64 prev_count, new_count, delta; u64 prev_count, new_count, delta;
int shift; int shift;
if (event->hw.idx == UNCORE_PMC_IDX_FIXED) if (uncore_pmc_freerunning(event->hw.idx))
shift = 64 - uncore_freerunning_bits(box, event);
else if (uncore_pmc_fixed(event->hw.idx))
shift = 64 - uncore_fixed_ctr_bits(box); shift = 64 - uncore_fixed_ctr_bits(box);
else else
shift = 64 - uncore_perf_ctr_bits(box); shift = 64 - uncore_perf_ctr_bits(box);
...@@ -454,10 +456,25 @@ static void uncore_pmu_event_start(struct perf_event *event, int flags) ...@@ -454,10 +456,25 @@ static void uncore_pmu_event_start(struct perf_event *event, int flags)
struct intel_uncore_box *box = uncore_event_to_box(event); struct intel_uncore_box *box = uncore_event_to_box(event);
int idx = event->hw.idx; int idx = event->hw.idx;
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
return; return;
if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) /*
* Free running counter is read-only and always active.
* Use the current counter value as start point.
* There is no overflow interrupt for free running counter.
* Use hrtimer to periodically poll the counter to avoid overflow.
*/
if (uncore_pmc_freerunning(event->hw.idx)) {
list_add_tail(&event->active_entry, &box->active_list);
local64_set(&event->hw.prev_count,
uncore_read_counter(box, event));
if (box->n_active++ == 0)
uncore_pmu_start_hrtimer(box);
return;
}
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
return; return;
event->hw.state = 0; event->hw.state = 0;
...@@ -479,6 +496,15 @@ static void uncore_pmu_event_stop(struct perf_event *event, int flags) ...@@ -479,6 +496,15 @@ static void uncore_pmu_event_stop(struct perf_event *event, int flags)
struct intel_uncore_box *box = uncore_event_to_box(event); struct intel_uncore_box *box = uncore_event_to_box(event);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
/* Cannot disable free running counter which is read-only */
if (uncore_pmc_freerunning(hwc->idx)) {
list_del(&event->active_entry);
if (--box->n_active == 0)
uncore_pmu_cancel_hrtimer(box);
uncore_perf_event_update(box, event);
return;
}
if (__test_and_clear_bit(hwc->idx, box->active_mask)) { if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
uncore_disable_event(box, event); uncore_disable_event(box, event);
box->n_active--; box->n_active--;
...@@ -512,6 +538,17 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags) ...@@ -512,6 +538,17 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags)
if (!box) if (!box)
return -ENODEV; return -ENODEV;
/*
* The free funning counter is assigned in event_init().
* The free running counter event and free running counter
* are 1:1 mapped. It doesn't need to be tracked in event_list.
*/
if (uncore_pmc_freerunning(hwc->idx)) {
if (flags & PERF_EF_START)
uncore_pmu_event_start(event, 0);
return 0;
}
ret = n = uncore_collect_events(box, event, false); ret = n = uncore_collect_events(box, event, false);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -570,6 +607,14 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags) ...@@ -570,6 +607,14 @@ static void uncore_pmu_event_del(struct perf_event *event, int flags)
uncore_pmu_event_stop(event, PERF_EF_UPDATE); uncore_pmu_event_stop(event, PERF_EF_UPDATE);
/*
* The event for free running counter is not tracked by event_list.
* It doesn't need to force event->hw.idx = -1 to reassign the counter.
* Because the event and the free running counter are 1:1 mapped.
*/
if (uncore_pmc_freerunning(event->hw.idx))
return;
for (i = 0; i < box->n_events; i++) { for (i = 0; i < box->n_events; i++) {
if (event == box->event_list[i]) { if (event == box->event_list[i]) {
uncore_put_event_constraint(box, event); uncore_put_event_constraint(box, event);
...@@ -603,6 +648,10 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu, ...@@ -603,6 +648,10 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
struct intel_uncore_box *fake_box; struct intel_uncore_box *fake_box;
int ret = -EINVAL, n; int ret = -EINVAL, n;
/* The free running counter is always active. */
if (uncore_pmc_freerunning(event->hw.idx))
return 0;
fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
if (!fake_box) if (!fake_box)
return -ENOMEM; return -ENOMEM;
...@@ -690,6 +739,17 @@ static int uncore_pmu_event_init(struct perf_event *event) ...@@ -690,6 +739,17 @@ static int uncore_pmu_event_init(struct perf_event *event)
/* fixed counters have event field hardcoded to zero */ /* fixed counters have event field hardcoded to zero */
hwc->config = 0ULL; hwc->config = 0ULL;
} else if (is_freerunning_event(event)) {
if (!check_valid_freerunning_event(box, event))
return -EINVAL;
event->hw.idx = UNCORE_PMC_IDX_FREERUNNING;
/*
* The free running counter event and free running counter
* are always 1:1 mapped.
* The free running counter is always active.
* Assign the free running counter here.
*/
event->hw.event_base = uncore_freerunning_counter(box, event);
} else { } else {
hwc->config = event->attr.config & hwc->config = event->attr.config &
(pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment