Commit e1ad1ac2 authored by Like Xu's avatar Like Xu Committed by Peter Zijlstra

perf/x86: Keep LBR records unchanged in host context for guest usage

When a guest wants to use the LBR registers, its hypervisor creates a guest
LBR event and let host perf schedules it. The LBR records msrs are
accessible to the guest when its guest LBR event is scheduled on
by the perf subsystem.

Before scheduling this event out, we should avoid host changes on
IA32_DEBUGCTLMSR or LBR_SELECT. Otherwise, some unexpected branch
operations may interfere with guest behavior, pollute LBR records, and even
cause host branches leakage. In addition, the read operation
on host is also avoidable.

To ensure that guest LBR records are not lost during the context switch,
the guest LBR event would enable the callstack mode which could
save/restore guest unread LBR records with the help of
intel_pmu_lbr_sched_task() naturally.

However, the guest LBR_SELECT may changes for its own use and the host
LBR event doesn't save/restore it. To ensure that we doesn't lost the guest
LBR_SELECT value when the guest LBR event is running, the vlbr_constraint
is bound up with a new constraint flag PERF_X86_EVENT_LBR_SELECT.
Signed-off-by: default avatarLike Xu <like.xu@linux.intel.com>
Signed-off-by: default avatarWei Wang <wei.w.wang@intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200514083054.62538-6-like.xu@linux.intel.com
parent 097e4311
...@@ -2189,7 +2189,8 @@ static void intel_pmu_disable_event(struct perf_event *event) ...@@ -2189,7 +2189,8 @@ static void intel_pmu_disable_event(struct perf_event *event)
} else if (idx == INTEL_PMC_IDX_FIXED_BTS) { } else if (idx == INTEL_PMC_IDX_FIXED_BTS) {
intel_pmu_disable_bts(); intel_pmu_disable_bts();
intel_pmu_drain_bts_buffer(); intel_pmu_drain_bts_buffer();
} } else if (idx == INTEL_PMC_IDX_FIXED_VLBR)
intel_clear_masks(event, idx);
/* /*
* Needs to be called after x86_pmu_disable_event, * Needs to be called after x86_pmu_disable_event,
...@@ -2271,7 +2272,8 @@ static void intel_pmu_enable_event(struct perf_event *event) ...@@ -2271,7 +2272,8 @@ static void intel_pmu_enable_event(struct perf_event *event)
if (!__this_cpu_read(cpu_hw_events.enabled)) if (!__this_cpu_read(cpu_hw_events.enabled))
return; return;
intel_pmu_enable_bts(hwc->config); intel_pmu_enable_bts(hwc->config);
} } else if (idx == INTEL_PMC_IDX_FIXED_VLBR)
intel_set_masks(event, idx);
} }
static void intel_pmu_add_event(struct perf_event *event) static void intel_pmu_add_event(struct perf_event *event)
......
...@@ -383,6 +383,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) ...@@ -383,6 +383,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
wrmsrl(x86_pmu.lbr_tos, tos); wrmsrl(x86_pmu.lbr_tos, tos);
task_ctx->lbr_stack_state = LBR_NONE; task_ctx->lbr_stack_state = LBR_NONE;
if (cpuc->lbr_select)
wrmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
} }
static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
...@@ -415,6 +418,9 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) ...@@ -415,6 +418,9 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
cpuc->last_task_ctx = task_ctx; cpuc->last_task_ctx = task_ctx;
cpuc->last_log_id = ++task_ctx->log_id; cpuc->last_log_id = ++task_ctx->log_id;
if (cpuc->lbr_select)
rdmsrl(MSR_LBR_SELECT, task_ctx->lbr_sel);
} }
void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev, void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev,
...@@ -485,6 +491,9 @@ void intel_pmu_lbr_add(struct perf_event *event) ...@@ -485,6 +491,9 @@ void intel_pmu_lbr_add(struct perf_event *event)
if (!x86_pmu.lbr_nr) if (!x86_pmu.lbr_nr)
return; return;
if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
cpuc->lbr_select = 1;
cpuc->br_sel = event->hw.branch_reg.reg; cpuc->br_sel = event->hw.branch_reg.reg;
if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) { if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
...@@ -532,6 +541,9 @@ void intel_pmu_lbr_del(struct perf_event *event) ...@@ -532,6 +541,9 @@ void intel_pmu_lbr_del(struct perf_event *event)
task_ctx->lbr_callstack_users--; task_ctx->lbr_callstack_users--;
} }
if (event->hw.flags & PERF_X86_EVENT_LBR_SELECT)
cpuc->lbr_select = 0;
if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0) if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
cpuc->lbr_pebs_users--; cpuc->lbr_pebs_users--;
cpuc->lbr_users--; cpuc->lbr_users--;
...@@ -540,11 +552,19 @@ void intel_pmu_lbr_del(struct perf_event *event) ...@@ -540,11 +552,19 @@ void intel_pmu_lbr_del(struct perf_event *event)
perf_sched_cb_dec(event->ctx->pmu); perf_sched_cb_dec(event->ctx->pmu);
} }
static inline bool vlbr_exclude_host(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
return test_bit(INTEL_PMC_IDX_FIXED_VLBR,
(unsigned long *)&cpuc->intel_ctrl_guest_mask);
}
void intel_pmu_lbr_enable_all(bool pmi) void intel_pmu_lbr_enable_all(bool pmi)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->lbr_users) if (cpuc->lbr_users && !vlbr_exclude_host())
__intel_pmu_lbr_enable(pmi); __intel_pmu_lbr_enable(pmi);
} }
...@@ -552,7 +572,7 @@ void intel_pmu_lbr_disable_all(void) ...@@ -552,7 +572,7 @@ void intel_pmu_lbr_disable_all(void)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->lbr_users) if (cpuc->lbr_users && !vlbr_exclude_host())
__intel_pmu_lbr_disable(); __intel_pmu_lbr_disable();
} }
...@@ -694,7 +714,8 @@ void intel_pmu_lbr_read(void) ...@@ -694,7 +714,8 @@ void intel_pmu_lbr_read(void)
* This could be smarter and actually check the event, * This could be smarter and actually check the event,
* but this simple approach seems to work for now. * but this simple approach seems to work for now.
*/ */
if (!cpuc->lbr_users || cpuc->lbr_users == cpuc->lbr_pebs_users) if (!cpuc->lbr_users || vlbr_exclude_host() ||
cpuc->lbr_users == cpuc->lbr_pebs_users)
return; return;
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
...@@ -1365,5 +1386,5 @@ int x86_perf_get_lbr(struct x86_pmu_lbr *lbr) ...@@ -1365,5 +1386,5 @@ int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
EXPORT_SYMBOL_GPL(x86_perf_get_lbr); EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
struct event_constraint vlbr_constraint = struct event_constraint vlbr_constraint =
FIXED_EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, __EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
(INTEL_PMC_IDX_FIXED_VLBR - INTEL_PMC_IDX_FIXED)); FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT);
...@@ -78,6 +78,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode) ...@@ -78,6 +78,7 @@ static inline bool constraint_match(struct event_constraint *c, u64 ecode)
#define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */ #define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
#define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */ #define PERF_X86_EVENT_PEBS_VIA_PT 0x0800 /* use PT buffer for PEBS */
#define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */ #define PERF_X86_EVENT_PAIR 0x1000 /* Large Increment per Cycle */
#define PERF_X86_EVENT_LBR_SELECT 0x2000 /* Save/Restore MSR_LBR_SELECT */
struct amd_nb { struct amd_nb {
int nb_id; /* NorthBridge id */ int nb_id; /* NorthBridge id */
...@@ -237,6 +238,7 @@ struct cpu_hw_events { ...@@ -237,6 +238,7 @@ struct cpu_hw_events {
u64 br_sel; u64 br_sel;
struct x86_perf_task_context *last_task_ctx; struct x86_perf_task_context *last_task_ctx;
int last_log_id; int last_log_id;
int lbr_select;
/* /*
* Intel host/guest exclude bits * Intel host/guest exclude bits
...@@ -722,6 +724,7 @@ struct x86_perf_task_context { ...@@ -722,6 +724,7 @@ struct x86_perf_task_context {
u64 lbr_from[MAX_LBR_ENTRIES]; u64 lbr_from[MAX_LBR_ENTRIES];
u64 lbr_to[MAX_LBR_ENTRIES]; u64 lbr_to[MAX_LBR_ENTRIES];
u64 lbr_info[MAX_LBR_ENTRIES]; u64 lbr_info[MAX_LBR_ENTRIES];
u64 lbr_sel;
int tos; int tos;
int valid_lbrs; int valid_lbrs;
int lbr_callstack_users; int lbr_callstack_users;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment