Commit cb2a0235 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Borislav Petkov

x86/cr4: Sanitize CR4.PCE update

load_mm_cr4_irqsoff() is really a strange name for a function which has
only one purpose: Update the CR4.PCE bit depending on the perf state.

Rename it to update_cr4_pce_mm(), move it into the tlb code and provide a
function which can be invoked by the perf smp function calls.

Another step to remove exposure of cpu_tlbstate.

No functional change.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarAlexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200421092559.049499158@linutronix.de
parent d8f0b353
...@@ -2162,11 +2162,6 @@ static int x86_pmu_event_init(struct perf_event *event) ...@@ -2162,11 +2162,6 @@ static int x86_pmu_event_init(struct perf_event *event)
return err; return err;
} }
static void refresh_pce(void *ignored)
{
load_mm_cr4_irqsoff(this_cpu_read(cpu_tlbstate.loaded_mm));
}
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{ {
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
...@@ -2185,7 +2180,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) ...@@ -2185,7 +2180,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
lockdep_assert_held_write(&mm->mmap_sem); lockdep_assert_held_write(&mm->mmap_sem);
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1) if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1); on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
} }
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
...@@ -2195,7 +2190,7 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m ...@@ -2195,7 +2190,7 @@ static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *m
return; return;
if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed)) if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed))
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1); on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
} }
static int x86_pmu_event_idx(struct perf_event *event) static int x86_pmu_event_idx(struct perf_event *event)
...@@ -2253,7 +2248,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev, ...@@ -2253,7 +2248,7 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
else if (x86_pmu.attr_rdpmc == 2) else if (x86_pmu.attr_rdpmc == 2)
static_branch_dec(&rdpmc_always_available_key); static_branch_dec(&rdpmc_always_available_key);
on_each_cpu(refresh_pce, NULL, 1); on_each_cpu(cr4_update_pce, NULL, 1);
x86_pmu.attr_rdpmc = val; x86_pmu.attr_rdpmc = val;
} }
......
...@@ -24,21 +24,9 @@ static inline void paravirt_activate_mm(struct mm_struct *prev, ...@@ -24,21 +24,9 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
#endif /* !CONFIG_PARAVIRT_XXL */ #endif /* !CONFIG_PARAVIRT_XXL */
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key); DECLARE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key); DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
void cr4_update_pce(void *ignored);
static inline void load_mm_cr4_irqsoff(struct mm_struct *mm)
{
if (static_branch_unlikely(&rdpmc_always_available_key) ||
(!static_branch_unlikely(&rdpmc_never_available_key) &&
atomic_read(&mm->context.perf_rdpmc_allowed)))
cr4_set_bits_irqsoff(X86_CR4_PCE);
else
cr4_clear_bits_irqsoff(X86_CR4_PCE);
}
#else
static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) {}
#endif #endif
#ifdef CONFIG_MODIFY_LDT_SYSCALL #ifdef CONFIG_MODIFY_LDT_SYSCALL
......
...@@ -272,6 +272,26 @@ static void cond_ibpb(struct task_struct *next) ...@@ -272,6 +272,26 @@ static void cond_ibpb(struct task_struct *next)
} }
} }
#ifdef CONFIG_PERF_EVENTS
static inline void cr4_update_pce_mm(struct mm_struct *mm)
{
if (static_branch_unlikely(&rdpmc_always_available_key) ||
(!static_branch_unlikely(&rdpmc_never_available_key) &&
atomic_read(&mm->context.perf_rdpmc_allowed)))
cr4_set_bits_irqsoff(X86_CR4_PCE);
else
cr4_clear_bits_irqsoff(X86_CR4_PCE);
}
void cr4_update_pce(void *ignored)
{
cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm));
}
#else
static inline void cr4_update_pce_mm(struct mm_struct *mm) { }
#endif
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
...@@ -440,7 +460,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, ...@@ -440,7 +460,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
if (next != real_prev) { if (next != real_prev) {
load_mm_cr4_irqsoff(next); cr4_update_pce_mm(next);
switch_ldt(real_prev, next); switch_ldt(real_prev, next);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment