Commit c7138f37 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: fix perf_poll()

Impact: fix kerneltop 100% CPU usage

Only return a poll event when there's actually been one, poll_wait()
doesn't actually wait for the waitq you pass it, it only enqueues
you on it.

Only once all FDs have been iterated and none of thm returned a
poll-event will it schedule().

Also make it return POLL_HUP when there's not mmap() area to read from.

Further, fix a silly bug in the write code.
Reported-by: default avatarMike Galbraith <efault@gmx.de>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arjan van de Ven <arjan@infradead.org>
Orig-LKML-Reference: <1237897096.24918.181.camel@twins>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f66c6b20
...@@ -246,6 +246,7 @@ struct file; ...@@ -246,6 +246,7 @@ struct file;
struct perf_mmap_data { struct perf_mmap_data {
struct rcu_head rcu_head; struct rcu_head rcu_head;
int nr_pages; int nr_pages;
atomic_t wakeup;
atomic_t head; atomic_t head;
struct perf_counter_mmap_page *user_page; struct perf_counter_mmap_page *user_page;
void *data_pages[0]; void *data_pages[0];
......
...@@ -1161,7 +1161,16 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) ...@@ -1161,7 +1161,16 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
static unsigned int perf_poll(struct file *file, poll_table *wait) static unsigned int perf_poll(struct file *file, poll_table *wait)
{ {
struct perf_counter *counter = file->private_data; struct perf_counter *counter = file->private_data;
unsigned int events = POLLIN; struct perf_mmap_data *data;
unsigned int events;
rcu_read_lock();
data = rcu_dereference(counter->data);
if (data)
events = atomic_xchg(&data->wakeup, 0);
else
events = POLL_HUP;
rcu_read_unlock();
poll_wait(file, &counter->waitq, wait); poll_wait(file, &counter->waitq, wait);
...@@ -1425,7 +1434,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi, ...@@ -1425,7 +1434,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi,
do { do {
offset = head = atomic_read(&data->head); offset = head = atomic_read(&data->head);
head += sizeof(u64); head += size;
} while (atomic_cmpxchg(&data->head, offset, head) != offset); } while (atomic_cmpxchg(&data->head, offset, head) != offset);
wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT); wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
...@@ -1446,6 +1455,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi, ...@@ -1446,6 +1455,7 @@ static int perf_output_write(struct perf_counter *counter, int nmi,
* generate a poll() wakeup for every page boundary crossed * generate a poll() wakeup for every page boundary crossed
*/ */
if (wakeup) { if (wakeup) {
atomic_xchg(&data->wakeup, POLL_IN);
__perf_counter_update_userpage(counter, data); __perf_counter_update_userpage(counter, data);
if (nmi) { if (nmi) {
counter->wakeup_pending = 1; counter->wakeup_pending = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment