Commit 64b609d6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-urgent-2020-11-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Thomas Gleixner:
 "A set of fixes for perf:

    - A set of commits which reduce the stack usage of various perf
      event handling functions which allocated large data structs on
      stack causing stack overflows in the worst case

    - Use the proper mechanism for detecting soft interrupts in the
      recursion protection

    - Make the resursion protection simpler and more robust

    - Simplify the scheduling of event groups to make the code more
      robust and prepare for fixing the issues vs. scheduling of
      exclusive event groups

    - Prevent event multiplexing and rotation for exclusive event groups

    - Correct the perf event attribute exclusive semantics to take
      pinned events, e.g. the PMU watchdog, into account

    - Make the anythread filtering conditional for Intel's generic PMU
      counters as it is not longer guaranteed to be supported on newer
      CPUs. Check the corresponding CPUID leaf to make sure

    - Fixup a duplicate initialization in an array which was probably
      caused by the usual 'copy & paste - forgot to edit' mishap"

* tag 'perf-urgent-2020-11-15' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/intel/uncore: Fix Add BW copypasta
  perf/x86/intel: Make anythread filter support conditional
  perf: Tweak perf_event_attr::exclusive semantics
  perf: Fix event multiplexing for exclusive groups
  perf: Simplify group_sched_in()
  perf: Simplify group_sched_out()
  perf/x86: Make dummy_iregs static
  perf/arch: Remove perf_sample_data::regs_user_copy
  perf: Optimize get_recursion_context()
  perf: Fix get_recursion_context()
  perf/x86: Reduce stack usage for x86_pmu::drain_pebs()
  perf: Reduce stack usage of perf_output_begin()
parents d0a37fd5 1a8cfa24
...@@ -32,8 +32,7 @@ u64 perf_reg_abi(struct task_struct *task) ...@@ -32,8 +32,7 @@ u64 perf_reg_abi(struct task_struct *task)
} }
void perf_get_regs_user(struct perf_regs *regs_user, void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs, struct pt_regs *regs)
struct pt_regs *regs_user_copy)
{ {
regs_user->regs = task_pt_regs(current); regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current); regs_user->abi = perf_reg_abi(current);
......
...@@ -73,8 +73,7 @@ u64 perf_reg_abi(struct task_struct *task) ...@@ -73,8 +73,7 @@ u64 perf_reg_abi(struct task_struct *task)
} }
void perf_get_regs_user(struct perf_regs *regs_user, void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs, struct pt_regs *regs)
struct pt_regs *regs_user_copy)
{ {
regs_user->regs = task_pt_regs(current); regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current); regs_user->abi = perf_reg_abi(current);
......
...@@ -32,8 +32,7 @@ u64 perf_reg_abi(struct task_struct *task) ...@@ -32,8 +32,7 @@ u64 perf_reg_abi(struct task_struct *task)
} }
void perf_get_regs_user(struct perf_regs *regs_user, void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs, struct pt_regs *regs)
struct pt_regs *regs_user_copy)
{ {
regs_user->regs = task_pt_regs(current); regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current); regs_user->abi = perf_reg_abi(current);
......
...@@ -1336,7 +1336,7 @@ static void dump_trace_imc_data(struct perf_event *event) ...@@ -1336,7 +1336,7 @@ static void dump_trace_imc_data(struct perf_event *event)
/* If this is a valid record, create the sample */ /* If this is a valid record, create the sample */
struct perf_output_handle handle; struct perf_output_handle handle;
if (perf_output_begin(&handle, event, header.size)) if (perf_output_begin(&handle, &data, event, header.size))
return; return;
perf_output_sample(&handle, &header, &data, event); perf_output_sample(&handle, &header, &data, event);
......
...@@ -144,8 +144,7 @@ u64 perf_reg_abi(struct task_struct *task) ...@@ -144,8 +144,7 @@ u64 perf_reg_abi(struct task_struct *task)
} }
void perf_get_regs_user(struct perf_regs *regs_user, void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs, struct pt_regs *regs)
struct pt_regs *regs_user_copy)
{ {
regs_user->regs = task_pt_regs(current); regs_user->regs = task_pt_regs(current);
regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) : regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
......
...@@ -36,8 +36,7 @@ u64 perf_reg_abi(struct task_struct *task) ...@@ -36,8 +36,7 @@ u64 perf_reg_abi(struct task_struct *task)
} }
void perf_get_regs_user(struct perf_regs *regs_user, void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs, struct pt_regs *regs)
struct pt_regs *regs_user_copy)
{ {
regs_user->regs = task_pt_regs(current); regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current); regs_user->abi = perf_reg_abi(current);
......
...@@ -672,7 +672,7 @@ static void cpumsf_output_event_pid(struct perf_event *event, ...@@ -672,7 +672,7 @@ static void cpumsf_output_event_pid(struct perf_event *event,
rcu_read_lock(); rcu_read_lock();
perf_prepare_sample(&header, data, event, regs); perf_prepare_sample(&header, data, event, regs);
if (perf_output_begin(&handle, event, header.size)) if (perf_output_begin(&handle, data, event, header.size))
goto out; goto out;
/* Update the process ID (see also kernel/events/core.c) */ /* Update the process ID (see also kernel/events/core.c) */
......
...@@ -53,8 +53,7 @@ u64 perf_reg_abi(struct task_struct *task) ...@@ -53,8 +53,7 @@ u64 perf_reg_abi(struct task_struct *task)
} }
void perf_get_regs_user(struct perf_regs *regs_user, void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs, struct pt_regs *regs)
struct pt_regs *regs_user_copy)
{ {
/* /*
* Use the regs from the first interruption and let * Use the regs from the first interruption and let
......
...@@ -2630,7 +2630,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) ...@@ -2630,7 +2630,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
u64 pebs_enabled = cpuc->pebs_enabled; u64 pebs_enabled = cpuc->pebs_enabled;
handled++; handled++;
x86_pmu.drain_pebs(regs); x86_pmu.drain_pebs(regs, &data);
status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/* /*
...@@ -4987,6 +4987,12 @@ __init int intel_pmu_init(void) ...@@ -4987,6 +4987,12 @@ __init int intel_pmu_init(void)
x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */ x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
if (version >= 5) {
x86_pmu.intel_cap.anythread_deprecated = edx.split.anythread_deprecated;
if (x86_pmu.intel_cap.anythread_deprecated)
pr_cont(" AnyThread deprecated, ");
}
/* /*
* Install the hw-cache-events table: * Install the hw-cache-events table:
*/ */
...@@ -5512,6 +5518,10 @@ __init int intel_pmu_init(void) ...@@ -5512,6 +5518,10 @@ __init int intel_pmu_init(void)
x86_pmu.intel_ctrl |= x86_pmu.intel_ctrl |=
((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED; ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
/* AnyThread may be deprecated on arch perfmon v5 or later */
if (x86_pmu.intel_cap.anythread_deprecated)
x86_pmu.format_attrs = intel_arch_formats_attr;
if (x86_pmu.event_constraints) { if (x86_pmu.event_constraints) {
/* /*
* event on fixed counter2 (REF_CYCLES) only works on this * event on fixed counter2 (REF_CYCLES) only works on this
......
...@@ -642,8 +642,8 @@ int intel_pmu_drain_bts_buffer(void) ...@@ -642,8 +642,8 @@ int intel_pmu_drain_bts_buffer(void)
rcu_read_lock(); rcu_read_lock();
perf_prepare_sample(&header, &data, event, &regs); perf_prepare_sample(&header, &data, event, &regs);
if (perf_output_begin(&handle, event, header.size * if (perf_output_begin(&handle, &data, event,
(top - base - skip))) header.size * (top - base - skip)))
goto unlock; goto unlock;
for (at = base; at < top; at++) { for (at = base; at < top; at++) {
...@@ -670,7 +670,9 @@ int intel_pmu_drain_bts_buffer(void) ...@@ -670,7 +670,9 @@ int intel_pmu_drain_bts_buffer(void)
static inline void intel_pmu_drain_pebs_buffer(void) static inline void intel_pmu_drain_pebs_buffer(void)
{ {
x86_pmu.drain_pebs(NULL); struct perf_sample_data data;
x86_pmu.drain_pebs(NULL, &data);
} }
/* /*
...@@ -1719,23 +1721,24 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count) ...@@ -1719,23 +1721,24 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
return 0; return 0;
} }
static void __intel_pmu_pebs_event(struct perf_event *event, static __always_inline void
struct pt_regs *iregs, __intel_pmu_pebs_event(struct perf_event *event,
void *base, void *top, struct pt_regs *iregs,
int bit, int count, struct perf_sample_data *data,
void (*setup_sample)(struct perf_event *, void *base, void *top,
struct pt_regs *, int bit, int count,
void *, void (*setup_sample)(struct perf_event *,
struct perf_sample_data *, struct pt_regs *,
struct pt_regs *)) void *,
struct perf_sample_data *,
struct pt_regs *))
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
struct perf_sample_data data;
struct x86_perf_regs perf_regs; struct x86_perf_regs perf_regs;
struct pt_regs *regs = &perf_regs.regs; struct pt_regs *regs = &perf_regs.regs;
void *at = get_next_pebs_record_by_bit(base, top, bit); void *at = get_next_pebs_record_by_bit(base, top, bit);
struct pt_regs dummy_iregs; static struct pt_regs dummy_iregs;
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
/* /*
...@@ -1752,14 +1755,14 @@ static void __intel_pmu_pebs_event(struct perf_event *event, ...@@ -1752,14 +1755,14 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
iregs = &dummy_iregs; iregs = &dummy_iregs;
while (count > 1) { while (count > 1) {
setup_sample(event, iregs, at, &data, regs); setup_sample(event, iregs, at, data, regs);
perf_event_output(event, &data, regs); perf_event_output(event, data, regs);
at += cpuc->pebs_record_size; at += cpuc->pebs_record_size;
at = get_next_pebs_record_by_bit(at, top, bit); at = get_next_pebs_record_by_bit(at, top, bit);
count--; count--;
} }
setup_sample(event, iregs, at, &data, regs); setup_sample(event, iregs, at, data, regs);
if (iregs == &dummy_iregs) { if (iregs == &dummy_iregs) {
/* /*
* The PEBS records may be drained in the non-overflow context, * The PEBS records may be drained in the non-overflow context,
...@@ -1767,18 +1770,18 @@ static void __intel_pmu_pebs_event(struct perf_event *event, ...@@ -1767,18 +1770,18 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
* last record the same as other PEBS records, and doesn't * last record the same as other PEBS records, and doesn't
* invoke the generic overflow handler. * invoke the generic overflow handler.
*/ */
perf_event_output(event, &data, regs); perf_event_output(event, data, regs);
} else { } else {
/* /*
* All but the last records are processed. * All but the last records are processed.
* The last one is left to be able to call the overflow handler. * The last one is left to be able to call the overflow handler.
*/ */
if (perf_event_overflow(event, &data, regs)) if (perf_event_overflow(event, data, regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
} }
} }
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds; struct debug_store *ds = cpuc->ds;
...@@ -1812,7 +1815,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) ...@@ -1812,7 +1815,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
return; return;
} }
__intel_pmu_pebs_event(event, iregs, at, top, 0, n, __intel_pmu_pebs_event(event, iregs, data, at, top, 0, n,
setup_pebs_fixed_sample_data); setup_pebs_fixed_sample_data);
} }
...@@ -1835,7 +1838,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int ...@@ -1835,7 +1838,7 @@ static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int
} }
} }
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds; struct debug_store *ds = cpuc->ds;
...@@ -1942,14 +1945,14 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) ...@@ -1942,14 +1945,14 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
} }
if (counts[bit]) { if (counts[bit]) {
__intel_pmu_pebs_event(event, iregs, base, __intel_pmu_pebs_event(event, iregs, data, base,
top, bit, counts[bit], top, bit, counts[bit],
setup_pebs_fixed_sample_data); setup_pebs_fixed_sample_data);
} }
} }
} }
static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs) static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
{ {
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {}; short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
...@@ -1997,7 +2000,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs) ...@@ -1997,7 +2000,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
if (WARN_ON_ONCE(!event->attr.precise_ip)) if (WARN_ON_ONCE(!event->attr.precise_ip))
continue; continue;
__intel_pmu_pebs_event(event, iregs, base, __intel_pmu_pebs_event(event, iregs, data, base,
top, bit, counts[bit], top, bit, counts[bit],
setup_pebs_adaptive_sample_data); setup_pebs_adaptive_sample_data);
} }
......
...@@ -475,7 +475,7 @@ enum perf_snb_uncore_imc_freerunning_types { ...@@ -475,7 +475,7 @@ enum perf_snb_uncore_imc_freerunning_types {
static struct freerunning_counters snb_uncore_imc_freerunning[] = { static struct freerunning_counters snb_uncore_imc_freerunning[] = {
[SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
0x0, 0x0, 1, 32 }, 0x0, 0x0, 1, 32 },
[SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE, [SNB_PCI_UNCORE_IMC_DATA_WRITES] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
0x0, 0x0, 1, 32 }, 0x0, 0x0, 1, 32 },
[SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE, [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
0x0, 0x0, 1, 32 }, 0x0, 0x0, 1, 32 },
......
...@@ -585,6 +585,7 @@ union perf_capabilities { ...@@ -585,6 +585,7 @@ union perf_capabilities {
u64 pebs_baseline:1; u64 pebs_baseline:1;
u64 perf_metrics:1; u64 perf_metrics:1;
u64 pebs_output_pt_available:1; u64 pebs_output_pt_available:1;
u64 anythread_deprecated:1;
}; };
u64 capabilities; u64 capabilities;
}; };
...@@ -727,7 +728,7 @@ struct x86_pmu { ...@@ -727,7 +728,7 @@ struct x86_pmu {
int pebs_record_size; int pebs_record_size;
int pebs_buffer_size; int pebs_buffer_size;
int max_pebs_events; int max_pebs_events;
void (*drain_pebs)(struct pt_regs *regs); void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data);
struct event_constraint *pebs_constraints; struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event); void (*pebs_aliases)(struct perf_event *event);
unsigned long large_pebs_flags; unsigned long large_pebs_flags;
......
...@@ -137,7 +137,9 @@ union cpuid10_edx { ...@@ -137,7 +137,9 @@ union cpuid10_edx {
struct { struct {
unsigned int num_counters_fixed:5; unsigned int num_counters_fixed:5;
unsigned int bit_width_fixed:8; unsigned int bit_width_fixed:8;
unsigned int reserved:19; unsigned int reserved1:2;
unsigned int anythread_deprecated:1;
unsigned int reserved2:16;
} split; } split;
unsigned int full; unsigned int full;
}; };
......
...@@ -101,8 +101,7 @@ u64 perf_reg_abi(struct task_struct *task) ...@@ -101,8 +101,7 @@ u64 perf_reg_abi(struct task_struct *task)
} }
void perf_get_regs_user(struct perf_regs *regs_user, void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs, struct pt_regs *regs)
struct pt_regs *regs_user_copy)
{ {
regs_user->regs = task_pt_regs(current); regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current); regs_user->abi = perf_reg_abi(current);
...@@ -129,12 +128,20 @@ u64 perf_reg_abi(struct task_struct *task) ...@@ -129,12 +128,20 @@ u64 perf_reg_abi(struct task_struct *task)
return PERF_SAMPLE_REGS_ABI_64; return PERF_SAMPLE_REGS_ABI_64;
} }
static DEFINE_PER_CPU(struct pt_regs, nmi_user_regs);
void perf_get_regs_user(struct perf_regs *regs_user, void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs, struct pt_regs *regs)
struct pt_regs *regs_user_copy)
{ {
struct pt_regs *regs_user_copy = this_cpu_ptr(&nmi_user_regs);
struct pt_regs *user_regs = task_pt_regs(current); struct pt_regs *user_regs = task_pt_regs(current);
if (!in_nmi()) {
regs_user->regs = user_regs;
regs_user->abi = perf_reg_abi(current);
return;
}
/* /*
* If we're in an NMI that interrupted task_pt_regs setup, then * If we're in an NMI that interrupted task_pt_regs setup, then
* we can't sample user regs at all. This check isn't really * we can't sample user regs at all. This check isn't really
......
...@@ -681,7 +681,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -681,7 +681,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS); edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
edx.split.bit_width_fixed = cap.bit_width_fixed; edx.split.bit_width_fixed = cap.bit_width_fixed;
edx.split.reserved = 0; edx.split.anythread_deprecated = 1;
edx.split.reserved1 = 0;
edx.split.reserved2 = 0;
entry->eax = eax.full; entry->eax = eax.full;
entry->ebx = cap.events_mask; entry->ebx = cap.events_mask;
......
...@@ -1022,13 +1022,7 @@ struct perf_sample_data { ...@@ -1022,13 +1022,7 @@ struct perf_sample_data {
struct perf_callchain_entry *callchain; struct perf_callchain_entry *callchain;
u64 aux_size; u64 aux_size;
/*
* regs_user may point to task_pt_regs or to regs_user_copy, depending
* on arch details.
*/
struct perf_regs regs_user; struct perf_regs regs_user;
struct pt_regs regs_user_copy;
struct perf_regs regs_intr; struct perf_regs regs_intr;
u64 stack_user_size; u64 stack_user_size;
...@@ -1400,11 +1394,14 @@ perf_event_addr_filters(struct perf_event *event) ...@@ -1400,11 +1394,14 @@ perf_event_addr_filters(struct perf_event *event)
extern void perf_event_addr_filters_sync(struct perf_event *event); extern void perf_event_addr_filters_sync(struct perf_event *event);
extern int perf_output_begin(struct perf_output_handle *handle, extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_sample_data *data,
struct perf_event *event, unsigned int size); struct perf_event *event, unsigned int size);
extern int perf_output_begin_forward(struct perf_output_handle *handle, extern int perf_output_begin_forward(struct perf_output_handle *handle,
struct perf_event *event, struct perf_sample_data *data,
unsigned int size); struct perf_event *event,
unsigned int size);
extern int perf_output_begin_backward(struct perf_output_handle *handle, extern int perf_output_begin_backward(struct perf_output_handle *handle,
struct perf_sample_data *data,
struct perf_event *event, struct perf_event *event,
unsigned int size); unsigned int size);
......
...@@ -20,8 +20,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx); ...@@ -20,8 +20,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx);
int perf_reg_validate(u64 mask); int perf_reg_validate(u64 mask);
u64 perf_reg_abi(struct task_struct *task); u64 perf_reg_abi(struct task_struct *task);
void perf_get_regs_user(struct perf_regs *regs_user, void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs, struct pt_regs *regs);
struct pt_regs *regs_user_copy);
#else #else
#define PERF_REG_EXTENDED_MASK 0 #define PERF_REG_EXTENDED_MASK 0
...@@ -42,8 +41,7 @@ static inline u64 perf_reg_abi(struct task_struct *task) ...@@ -42,8 +41,7 @@ static inline u64 perf_reg_abi(struct task_struct *task)
} }
static inline void perf_get_regs_user(struct perf_regs *regs_user, static inline void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs, struct pt_regs *regs)
struct pt_regs *regs_user_copy)
{ {
regs_user->regs = task_pt_regs(current); regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current); regs_user->abi = perf_reg_abi(current);
......
...@@ -2312,9 +2312,6 @@ group_sched_out(struct perf_event *group_event, ...@@ -2312,9 +2312,6 @@ group_sched_out(struct perf_event *group_event,
event_sched_out(event, cpuctx, ctx); event_sched_out(event, cpuctx, ctx);
perf_pmu_enable(ctx->pmu); perf_pmu_enable(ctx->pmu);
if (group_event->attr.exclusive)
cpuctx->exclusive = 0;
} }
#define DETACH_GROUP 0x01UL #define DETACH_GROUP 0x01UL
...@@ -2583,11 +2580,8 @@ group_sched_in(struct perf_event *group_event, ...@@ -2583,11 +2580,8 @@ group_sched_in(struct perf_event *group_event,
pmu->start_txn(pmu, PERF_PMU_TXN_ADD); pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
if (event_sched_in(group_event, cpuctx, ctx)) { if (event_sched_in(group_event, cpuctx, ctx))
pmu->cancel_txn(pmu); goto error;
perf_mux_hrtimer_restart(cpuctx);
return -EAGAIN;
}
/* /*
* Schedule in siblings as one group (if any): * Schedule in siblings as one group (if any):
...@@ -2616,10 +2610,8 @@ group_sched_in(struct perf_event *group_event, ...@@ -2616,10 +2610,8 @@ group_sched_in(struct perf_event *group_event,
} }
event_sched_out(group_event, cpuctx, ctx); event_sched_out(group_event, cpuctx, ctx);
error:
pmu->cancel_txn(pmu); pmu->cancel_txn(pmu);
perf_mux_hrtimer_restart(cpuctx);
return -EAGAIN; return -EAGAIN;
} }
...@@ -2645,7 +2637,7 @@ static int group_can_go_on(struct perf_event *event, ...@@ -2645,7 +2637,7 @@ static int group_can_go_on(struct perf_event *event,
* If this group is exclusive and there are already * If this group is exclusive and there are already
* events on the CPU, it can't go on. * events on the CPU, it can't go on.
*/ */
if (event->attr.exclusive && cpuctx->active_oncpu) if (event->attr.exclusive && !list_empty(get_event_list(event)))
return 0; return 0;
/* /*
* Otherwise, try to add it if all previous groups were able * Otherwise, try to add it if all previous groups were able
...@@ -3679,6 +3671,7 @@ static int merge_sched_in(struct perf_event *event, void *data) ...@@ -3679,6 +3671,7 @@ static int merge_sched_in(struct perf_event *event, void *data)
*can_add_hw = 0; *can_add_hw = 0;
ctx->rotate_necessary = 1; ctx->rotate_necessary = 1;
perf_mux_hrtimer_restart(cpuctx);
} }
return 0; return 0;
...@@ -6374,14 +6367,13 @@ perf_output_sample_regs(struct perf_output_handle *handle, ...@@ -6374,14 +6367,13 @@ perf_output_sample_regs(struct perf_output_handle *handle,
} }
static void perf_sample_regs_user(struct perf_regs *regs_user, static void perf_sample_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs, struct pt_regs *regs)
struct pt_regs *regs_user_copy)
{ {
if (user_mode(regs)) { if (user_mode(regs)) {
regs_user->abi = perf_reg_abi(current); regs_user->abi = perf_reg_abi(current);
regs_user->regs = regs; regs_user->regs = regs;
} else if (!(current->flags & PF_KTHREAD)) { } else if (!(current->flags & PF_KTHREAD)) {
perf_get_regs_user(regs_user, regs, regs_user_copy); perf_get_regs_user(regs_user, regs);
} else { } else {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
regs_user->regs = NULL; regs_user->regs = NULL;
...@@ -7083,8 +7075,7 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7083,8 +7075,7 @@ void perf_prepare_sample(struct perf_event_header *header,
} }
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
perf_sample_regs_user(&data->regs_user, regs, perf_sample_regs_user(&data->regs_user, regs);
&data->regs_user_copy);
if (sample_type & PERF_SAMPLE_REGS_USER) { if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */ /* regs dump ABI info */
...@@ -7186,6 +7177,7 @@ __perf_event_output(struct perf_event *event, ...@@ -7186,6 +7177,7 @@ __perf_event_output(struct perf_event *event,
struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs, struct pt_regs *regs,
int (*output_begin)(struct perf_output_handle *, int (*output_begin)(struct perf_output_handle *,
struct perf_sample_data *,
struct perf_event *, struct perf_event *,
unsigned int)) unsigned int))
{ {
...@@ -7198,7 +7190,7 @@ __perf_event_output(struct perf_event *event, ...@@ -7198,7 +7190,7 @@ __perf_event_output(struct perf_event *event,
perf_prepare_sample(&header, data, event, regs); perf_prepare_sample(&header, data, event, regs);
err = output_begin(&handle, event, header.size); err = output_begin(&handle, data, event, header.size);
if (err) if (err)
goto exit; goto exit;
...@@ -7264,7 +7256,7 @@ perf_event_read_event(struct perf_event *event, ...@@ -7264,7 +7256,7 @@ perf_event_read_event(struct perf_event *event,
int ret; int ret;
perf_event_header__init_id(&read_event.header, &sample, event); perf_event_header__init_id(&read_event.header, &sample, event);
ret = perf_output_begin(&handle, event, read_event.header.size); ret = perf_output_begin(&handle, &sample, event, read_event.header.size);
if (ret) if (ret)
return; return;
...@@ -7533,7 +7525,7 @@ static void perf_event_task_output(struct perf_event *event, ...@@ -7533,7 +7525,7 @@ static void perf_event_task_output(struct perf_event *event,
perf_event_header__init_id(&task_event->event_id.header, &sample, event); perf_event_header__init_id(&task_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, &sample, event,
task_event->event_id.header.size); task_event->event_id.header.size);
if (ret) if (ret)
goto out; goto out;
...@@ -7636,7 +7628,7 @@ static void perf_event_comm_output(struct perf_event *event, ...@@ -7636,7 +7628,7 @@ static void perf_event_comm_output(struct perf_event *event,
return; return;
perf_event_header__init_id(&comm_event->event_id.header, &sample, event); perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, &sample, event,
comm_event->event_id.header.size); comm_event->event_id.header.size);
if (ret) if (ret)
...@@ -7736,7 +7728,7 @@ static void perf_event_namespaces_output(struct perf_event *event, ...@@ -7736,7 +7728,7 @@ static void perf_event_namespaces_output(struct perf_event *event,
perf_event_header__init_id(&namespaces_event->event_id.header, perf_event_header__init_id(&namespaces_event->event_id.header,
&sample, event); &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, &sample, event,
namespaces_event->event_id.header.size); namespaces_event->event_id.header.size);
if (ret) if (ret)
goto out; goto out;
...@@ -7863,7 +7855,7 @@ static void perf_event_cgroup_output(struct perf_event *event, void *data) ...@@ -7863,7 +7855,7 @@ static void perf_event_cgroup_output(struct perf_event *event, void *data)
perf_event_header__init_id(&cgroup_event->event_id.header, perf_event_header__init_id(&cgroup_event->event_id.header,
&sample, event); &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, &sample, event,
cgroup_event->event_id.header.size); cgroup_event->event_id.header.size);
if (ret) if (ret)
goto out; goto out;
...@@ -7989,7 +7981,7 @@ static void perf_event_mmap_output(struct perf_event *event, ...@@ -7989,7 +7981,7 @@ static void perf_event_mmap_output(struct perf_event *event,
} }
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, &sample, event,
mmap_event->event_id.header.size); mmap_event->event_id.header.size);
if (ret) if (ret)
goto out; goto out;
...@@ -8299,7 +8291,7 @@ void perf_event_aux_event(struct perf_event *event, unsigned long head, ...@@ -8299,7 +8291,7 @@ void perf_event_aux_event(struct perf_event *event, unsigned long head,
int ret; int ret;
perf_event_header__init_id(&rec.header, &sample, event); perf_event_header__init_id(&rec.header, &sample, event);
ret = perf_output_begin(&handle, event, rec.header.size); ret = perf_output_begin(&handle, &sample, event, rec.header.size);
if (ret) if (ret)
return; return;
...@@ -8333,7 +8325,7 @@ void perf_log_lost_samples(struct perf_event *event, u64 lost) ...@@ -8333,7 +8325,7 @@ void perf_log_lost_samples(struct perf_event *event, u64 lost)
perf_event_header__init_id(&lost_samples_event.header, &sample, event); perf_event_header__init_id(&lost_samples_event.header, &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, &sample, event,
lost_samples_event.header.size); lost_samples_event.header.size);
if (ret) if (ret)
return; return;
...@@ -8388,7 +8380,7 @@ static void perf_event_switch_output(struct perf_event *event, void *data) ...@@ -8388,7 +8380,7 @@ static void perf_event_switch_output(struct perf_event *event, void *data)
perf_event_header__init_id(&se->event_id.header, &sample, event); perf_event_header__init_id(&se->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event, se->event_id.header.size); ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size);
if (ret) if (ret)
return; return;
...@@ -8463,7 +8455,7 @@ static void perf_log_throttle(struct perf_event *event, int enable) ...@@ -8463,7 +8455,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
perf_event_header__init_id(&throttle_event.header, &sample, event); perf_event_header__init_id(&throttle_event.header, &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, &sample, event,
throttle_event.header.size); throttle_event.header.size);
if (ret) if (ret)
return; return;
...@@ -8506,7 +8498,7 @@ static void perf_event_ksymbol_output(struct perf_event *event, void *data) ...@@ -8506,7 +8498,7 @@ static void perf_event_ksymbol_output(struct perf_event *event, void *data)
perf_event_header__init_id(&ksymbol_event->event_id.header, perf_event_header__init_id(&ksymbol_event->event_id.header,
&sample, event); &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, &sample, event,
ksymbol_event->event_id.header.size); ksymbol_event->event_id.header.size);
if (ret) if (ret)
return; return;
...@@ -8596,7 +8588,7 @@ static void perf_event_bpf_output(struct perf_event *event, void *data) ...@@ -8596,7 +8588,7 @@ static void perf_event_bpf_output(struct perf_event *event, void *data)
perf_event_header__init_id(&bpf_event->event_id.header, perf_event_header__init_id(&bpf_event->event_id.header,
&sample, event); &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, data, event,
bpf_event->event_id.header.size); bpf_event->event_id.header.size);
if (ret) if (ret)
return; return;
...@@ -8705,7 +8697,8 @@ static void perf_event_text_poke_output(struct perf_event *event, void *data) ...@@ -8705,7 +8697,8 @@ static void perf_event_text_poke_output(struct perf_event *event, void *data)
perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event, text_poke_event->event_id.header.size); ret = perf_output_begin(&handle, &sample, event,
text_poke_event->event_id.header.size);
if (ret) if (ret)
return; return;
...@@ -8786,7 +8779,7 @@ static void perf_log_itrace_start(struct perf_event *event) ...@@ -8786,7 +8779,7 @@ static void perf_log_itrace_start(struct perf_event *event)
rec.tid = perf_event_tid(event, current); rec.tid = perf_event_tid(event, current);
perf_event_header__init_id(&rec.header, &sample, event); perf_event_header__init_id(&rec.header, &sample, event);
ret = perf_output_begin(&handle, event, rec.header.size); ret = perf_output_begin(&handle, &sample, event, rec.header.size);
if (ret) if (ret)
return; return;
......
...@@ -205,16 +205,12 @@ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) ...@@ -205,16 +205,12 @@ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
static inline int get_recursion_context(int *recursion) static inline int get_recursion_context(int *recursion)
{ {
int rctx; unsigned int pc = preempt_count();
unsigned char rctx = 0;
if (unlikely(in_nmi()))
rctx = 3; rctx += !!(pc & (NMI_MASK));
else if (in_irq()) rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK));
rctx = 2; rctx += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
else if (in_softirq())
rctx = 1;
else
rctx = 0;
if (recursion[rctx]) if (recursion[rctx])
return -1; return -1;
......
...@@ -147,6 +147,7 @@ ring_buffer_has_space(unsigned long head, unsigned long tail, ...@@ -147,6 +147,7 @@ ring_buffer_has_space(unsigned long head, unsigned long tail,
static __always_inline int static __always_inline int
__perf_output_begin(struct perf_output_handle *handle, __perf_output_begin(struct perf_output_handle *handle,
struct perf_sample_data *data,
struct perf_event *event, unsigned int size, struct perf_event *event, unsigned int size,
bool backward) bool backward)
{ {
...@@ -237,18 +238,16 @@ __perf_output_begin(struct perf_output_handle *handle, ...@@ -237,18 +238,16 @@ __perf_output_begin(struct perf_output_handle *handle,
handle->size = (1UL << page_shift) - offset; handle->size = (1UL << page_shift) - offset;
if (unlikely(have_lost)) { if (unlikely(have_lost)) {
struct perf_sample_data sample_data;
lost_event.header.size = sizeof(lost_event); lost_event.header.size = sizeof(lost_event);
lost_event.header.type = PERF_RECORD_LOST; lost_event.header.type = PERF_RECORD_LOST;
lost_event.header.misc = 0; lost_event.header.misc = 0;
lost_event.id = event->id; lost_event.id = event->id;
lost_event.lost = local_xchg(&rb->lost, 0); lost_event.lost = local_xchg(&rb->lost, 0);
perf_event_header__init_id(&lost_event.header, /* XXX mostly redundant; @data is already fully initializes */
&sample_data, event); perf_event_header__init_id(&lost_event.header, data, event);
perf_output_put(handle, lost_event); perf_output_put(handle, lost_event);
perf_event__output_id_sample(event, handle, &sample_data); perf_event__output_id_sample(event, handle, data);
} }
return 0; return 0;
...@@ -263,22 +262,25 @@ __perf_output_begin(struct perf_output_handle *handle, ...@@ -263,22 +262,25 @@ __perf_output_begin(struct perf_output_handle *handle,
} }
int perf_output_begin_forward(struct perf_output_handle *handle, int perf_output_begin_forward(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size) struct perf_sample_data *data,
struct perf_event *event, unsigned int size)
{ {
return __perf_output_begin(handle, event, size, false); return __perf_output_begin(handle, data, event, size, false);
} }
int perf_output_begin_backward(struct perf_output_handle *handle, int perf_output_begin_backward(struct perf_output_handle *handle,
struct perf_sample_data *data,
struct perf_event *event, unsigned int size) struct perf_event *event, unsigned int size)
{ {
return __perf_output_begin(handle, event, size, true); return __perf_output_begin(handle, data, event, size, true);
} }
int perf_output_begin(struct perf_output_handle *handle, int perf_output_begin(struct perf_output_handle *handle,
struct perf_sample_data *data,
struct perf_event *event, unsigned int size) struct perf_event *event, unsigned int size)
{ {
return __perf_output_begin(handle, event, size, return __perf_output_begin(handle, data, event, size,
unlikely(is_write_backward(event))); unlikely(is_write_backward(event)));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment