Commit 8b1e1363 authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar

perf/x86-ibs: Fix usage of IBS op current count

The value of IbsOpCurCnt rolls over when it reaches IbsOpMaxCnt. Thus,
it is reset to zero by hardware. To get the correct count we need to
add the max count to it in case we received an ibs sample (valid bit
set).
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1333390758-10893-13-git-send-email-robert.richter@amd.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent fc5fb2b5
...@@ -286,7 +286,15 @@ static u64 get_ibs_fetch_count(u64 config) ...@@ -286,7 +286,15 @@ static u64 get_ibs_fetch_count(u64 config)
static u64 get_ibs_op_count(u64 config) static u64 get_ibs_op_count(u64 config)
{ {
return (config & IBS_OP_CUR_CNT) >> 32; u64 count = 0;
if (config & IBS_OP_VAL)
count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */
if (ibs_caps & IBS_CAPS_RDWROPCNT)
count += (config & IBS_OP_CUR_CNT) >> 32;
return count;
} }
static void static void
...@@ -295,7 +303,12 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, ...@@ -295,7 +303,12 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
{ {
u64 count = perf_ibs->get_count(*config); u64 count = perf_ibs->get_count(*config);
while (!perf_event_try_update(event, count, 20)) { /*
* Set width to 64 since we do not overflow on max width but
* instead on max count. In perf_ibs_set_period() we clear
* prev count manually on overflow.
*/
while (!perf_event_try_update(event, count, 64)) {
rdmsrl(event->hw.config_base, *config); rdmsrl(event->hw.config_base, *config);
count = perf_ibs->get_count(*config); count = perf_ibs->get_count(*config);
} }
...@@ -374,6 +387,12 @@ static void perf_ibs_stop(struct perf_event *event, int flags) ...@@ -374,6 +387,12 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
if (hwc->state & PERF_HES_UPTODATE) if (hwc->state & PERF_HES_UPTODATE)
return; return;
/*
* Clear valid bit to not count rollovers on update, rollovers
* are only updated in the irq handler.
*/
config &= ~perf_ibs->valid_mask;
perf_ibs_event_update(perf_ibs, event, &config); perf_ibs_event_update(perf_ibs, event, &config);
hwc->state |= PERF_HES_UPTODATE; hwc->state |= PERF_HES_UPTODATE;
} }
...@@ -488,17 +507,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) ...@@ -488,17 +507,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
if (!(*buf++ & perf_ibs->valid_mask)) if (!(*buf++ & perf_ibs->valid_mask))
return 0; return 0;
/*
* Emulate IbsOpCurCnt in MSRC001_1033 (IbsOpCtl), not
* supported in all cpus. As this triggered an interrupt, we
* set the current count to the max count.
*/
config = &ibs_data.regs[0]; config = &ibs_data.regs[0];
if (perf_ibs == &perf_ibs_op && !(ibs_caps & IBS_CAPS_RDWROPCNT)) {
*config &= ~IBS_OP_CUR_CNT;
*config |= (*config & IBS_OP_MAX_CNT) << 36;
}
perf_ibs_event_update(perf_ibs, event, config); perf_ibs_event_update(perf_ibs, event, config);
perf_sample_data_init(&data, 0, hwc->last_period); perf_sample_data_init(&data, 0, hwc->last_period);
if (!perf_ibs_set_period(perf_ibs, hwc, &period)) if (!perf_ibs_set_period(perf_ibs, hwc, &period))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment