Commit eff2108f authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'perf/urgent' into perf/core

Merge in the latest fixes, to avoid conflicts with ongoing work.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents afb71193 f1a52789
...@@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = { ...@@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
EVENT_EXTRA_END EVENT_EXTRA_END
}; };
static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
EVENT_EXTRA_END EVENT_EXTRA_END
}; };
......
...@@ -390,8 +390,7 @@ struct perf_event { ...@@ -390,8 +390,7 @@ struct perf_event {
/* mmap bits */ /* mmap bits */
struct mutex mmap_mutex; struct mutex mmap_mutex;
atomic_t mmap_count; atomic_t mmap_count;
int mmap_locked;
struct user_struct *mmap_user;
struct ring_buffer *rb; struct ring_buffer *rb;
struct list_head rb_entry; struct list_head rb_entry;
......
This diff is collapsed.
...@@ -31,6 +31,10 @@ struct ring_buffer { ...@@ -31,6 +31,10 @@ struct ring_buffer {
spinlock_t event_lock; spinlock_t event_lock;
struct list_head event_list; struct list_head event_list;
atomic_t mmap_count;
unsigned long mmap_locked;
struct user_struct *mmap_user;
struct perf_event_mmap_page *user_page; struct perf_event_mmap_page *user_page;
void *data_pages[0]; void *data_pages[0];
}; };
......
...@@ -467,6 +467,7 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) ...@@ -467,6 +467,7 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
/* Optimization staging list, protected by kprobe_mutex */ /* Optimization staging list, protected by kprobe_mutex */
static LIST_HEAD(optimizing_list); static LIST_HEAD(optimizing_list);
static LIST_HEAD(unoptimizing_list); static LIST_HEAD(unoptimizing_list);
static LIST_HEAD(freeing_list);
static void kprobe_optimizer(struct work_struct *work); static void kprobe_optimizer(struct work_struct *work);
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
...@@ -504,7 +505,7 @@ static __kprobes void do_optimize_kprobes(void) ...@@ -504,7 +505,7 @@ static __kprobes void do_optimize_kprobes(void)
* Unoptimize (replace a jump with a breakpoint and remove the breakpoint * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
* if need) kprobes listed on unoptimizing_list. * if need) kprobes listed on unoptimizing_list.
*/ */
static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) static __kprobes void do_unoptimize_kprobes(void)
{ {
struct optimized_kprobe *op, *tmp; struct optimized_kprobe *op, *tmp;
...@@ -515,9 +516,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) ...@@ -515,9 +516,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
/* Ditto to do_optimize_kprobes */ /* Ditto to do_optimize_kprobes */
get_online_cpus(); get_online_cpus();
mutex_lock(&text_mutex); mutex_lock(&text_mutex);
arch_unoptimize_kprobes(&unoptimizing_list, free_list); arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
/* Loop free_list for disarming */ /* Loop free_list for disarming */
list_for_each_entry_safe(op, tmp, free_list, list) { list_for_each_entry_safe(op, tmp, &freeing_list, list) {
/* Disarm probes if marked disabled */ /* Disarm probes if marked disabled */
if (kprobe_disabled(&op->kp)) if (kprobe_disabled(&op->kp))
arch_disarm_kprobe(&op->kp); arch_disarm_kprobe(&op->kp);
...@@ -536,11 +537,11 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) ...@@ -536,11 +537,11 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
} }
/* Reclaim all kprobes on the free_list */ /* Reclaim all kprobes on the free_list */
static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) static __kprobes void do_free_cleaned_kprobes(void)
{ {
struct optimized_kprobe *op, *tmp; struct optimized_kprobe *op, *tmp;
list_for_each_entry_safe(op, tmp, free_list, list) { list_for_each_entry_safe(op, tmp, &freeing_list, list) {
BUG_ON(!kprobe_unused(&op->kp)); BUG_ON(!kprobe_unused(&op->kp));
list_del_init(&op->list); list_del_init(&op->list);
free_aggr_kprobe(&op->kp); free_aggr_kprobe(&op->kp);
...@@ -556,8 +557,6 @@ static __kprobes void kick_kprobe_optimizer(void) ...@@ -556,8 +557,6 @@ static __kprobes void kick_kprobe_optimizer(void)
/* Kprobe jump optimizer */ /* Kprobe jump optimizer */
static __kprobes void kprobe_optimizer(struct work_struct *work) static __kprobes void kprobe_optimizer(struct work_struct *work)
{ {
LIST_HEAD(free_list);
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
/* Lock modules while optimizing kprobes */ /* Lock modules while optimizing kprobes */
mutex_lock(&module_mutex); mutex_lock(&module_mutex);
...@@ -566,7 +565,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) ...@@ -566,7 +565,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
* kprobes before waiting for quiesence period. * kprobes before waiting for quiesence period.
*/ */
do_unoptimize_kprobes(&free_list); do_unoptimize_kprobes();
/* /*
* Step 2: Wait for quiesence period to ensure all running interrupts * Step 2: Wait for quiesence period to ensure all running interrupts
...@@ -581,7 +580,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) ...@@ -581,7 +580,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
do_optimize_kprobes(); do_optimize_kprobes();
/* Step 4: Free cleaned kprobes after quiesence period */ /* Step 4: Free cleaned kprobes after quiesence period */
do_free_cleaned_kprobes(&free_list); do_free_cleaned_kprobes();
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
...@@ -723,8 +722,19 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p) ...@@ -723,8 +722,19 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p)
if (!list_empty(&op->list)) if (!list_empty(&op->list))
/* Dequeue from the (un)optimization queue */ /* Dequeue from the (un)optimization queue */
list_del_init(&op->list); list_del_init(&op->list);
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
if (kprobe_unused(p)) {
/* Enqueue if it is unused */
list_add(&op->list, &freeing_list);
/*
* Remove unused probes from the hash list. After waiting
* for synchronization, this probe is reclaimed.
* (reclaiming is done by do_free_cleaned_kprobes().)
*/
hlist_del_rcu(&op->kp.hlist);
}
/* Don't touch the code, because it is already freed. */ /* Don't touch the code, because it is already freed. */
arch_remove_optimized_kprobe(op); arch_remove_optimized_kprobe(op);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment