Commit ee67e6cb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  oprofile: warn on freeing event buffer too early
  oprofile: fix race condition in event_buffer free
  lockdep: Use cpu_clock() for lockstat
parents 220a6258 c7cedb12
...@@ -35,12 +35,23 @@ static size_t buffer_pos; ...@@ -35,12 +35,23 @@ static size_t buffer_pos;
/* atomic_t because wait_event checks it outside of buffer_mutex */ /* atomic_t because wait_event checks it outside of buffer_mutex */
static atomic_t buffer_ready = ATOMIC_INIT(0); static atomic_t buffer_ready = ATOMIC_INIT(0);
/* Add an entry to the event buffer. When we /*
* get near to the end we wake up the process * Add an entry to the event buffer. When we get near to the end we
* sleeping on the read() of the file. * wake up the process sleeping on the read() of the file. To protect
* the event_buffer this function may only be called when buffer_mutex
* is set.
*/ */
void add_event_entry(unsigned long value) void add_event_entry(unsigned long value)
{ {
/*
* This shouldn't happen since all workqueues or handlers are
* canceled or flushed before the event buffer is freed.
*/
if (!event_buffer) {
WARN_ON_ONCE(1);
return;
}
if (buffer_pos == buffer_size) { if (buffer_pos == buffer_size) {
atomic_inc(&oprofile_stats.event_lost_overflow); atomic_inc(&oprofile_stats.event_lost_overflow);
return; return;
...@@ -69,7 +80,6 @@ void wake_up_buffer_waiter(void) ...@@ -69,7 +80,6 @@ void wake_up_buffer_waiter(void)
int alloc_event_buffer(void) int alloc_event_buffer(void)
{ {
int err = -ENOMEM;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&oprofilefs_lock, flags); spin_lock_irqsave(&oprofilefs_lock, flags);
...@@ -80,21 +90,22 @@ int alloc_event_buffer(void) ...@@ -80,21 +90,22 @@ int alloc_event_buffer(void)
if (buffer_watershed >= buffer_size) if (buffer_watershed >= buffer_size)
return -EINVAL; return -EINVAL;
buffer_pos = 0;
event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
if (!event_buffer) if (!event_buffer)
goto out; return -ENOMEM;
err = 0; return 0;
out:
return err;
} }
void free_event_buffer(void) void free_event_buffer(void)
{ {
mutex_lock(&buffer_mutex);
vfree(event_buffer); vfree(event_buffer);
buffer_pos = 0;
event_buffer = NULL; event_buffer = NULL;
mutex_unlock(&buffer_mutex);
} }
...@@ -167,6 +178,12 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf, ...@@ -167,6 +178,12 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf,
mutex_lock(&buffer_mutex); mutex_lock(&buffer_mutex);
/* May happen if the buffer is freed during pending reads. */
if (!event_buffer) {
retval = -EINTR;
goto out;
}
atomic_set(&buffer_ready, 0); atomic_set(&buffer_ready, 0);
retval = -EFAULT; retval = -EFAULT;
......
...@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) ...@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
#ifdef CONFIG_LOCK_STAT #ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
static inline u64 lockstat_clock(void)
{
return cpu_clock(smp_processor_id());
}
static int lock_point(unsigned long points[], unsigned long ip) static int lock_point(unsigned long points[], unsigned long ip)
{ {
int i; int i;
...@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip) ...@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip)
return i; return i;
} }
static void lock_time_inc(struct lock_time *lt, s64 time) static void lock_time_inc(struct lock_time *lt, u64 time)
{ {
if (time > lt->max) if (time > lt->max)
lt->max = time; lt->max = time;
...@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats) ...@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats)
static void lock_release_holdtime(struct held_lock *hlock) static void lock_release_holdtime(struct held_lock *hlock)
{ {
struct lock_class_stats *stats; struct lock_class_stats *stats;
s64 holdtime; u64 holdtime;
if (!lock_stat) if (!lock_stat)
return; return;
holdtime = sched_clock() - hlock->holdtime_stamp; holdtime = lockstat_clock() - hlock->holdtime_stamp;
stats = get_lock_stats(hlock_class(hlock)); stats = get_lock_stats(hlock_class(hlock));
if (hlock->read) if (hlock->read)
...@@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, ...@@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
hlock->references = references; hlock->references = references;
#ifdef CONFIG_LOCK_STAT #ifdef CONFIG_LOCK_STAT
hlock->waittime_stamp = 0; hlock->waittime_stamp = 0;
hlock->holdtime_stamp = sched_clock(); hlock->holdtime_stamp = lockstat_clock();
#endif #endif
if (check == 2 && !mark_irqflags(curr, hlock)) if (check == 2 && !mark_irqflags(curr, hlock))
...@@ -3322,7 +3327,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) ...@@ -3322,7 +3327,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
if (hlock->instance != lock) if (hlock->instance != lock)
return; return;
hlock->waittime_stamp = sched_clock(); hlock->waittime_stamp = lockstat_clock();
contention_point = lock_point(hlock_class(hlock)->contention_point, ip); contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
contending_point = lock_point(hlock_class(hlock)->contending_point, contending_point = lock_point(hlock_class(hlock)->contending_point,
...@@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) ...@@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
struct held_lock *hlock, *prev_hlock; struct held_lock *hlock, *prev_hlock;
struct lock_class_stats *stats; struct lock_class_stats *stats;
unsigned int depth; unsigned int depth;
u64 now; u64 now, waittime = 0;
s64 waittime = 0;
int i, cpu; int i, cpu;
depth = curr->lockdep_depth; depth = curr->lockdep_depth;
...@@ -3374,7 +3378,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) ...@@ -3374,7 +3378,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
cpu = smp_processor_id(); cpu = smp_processor_id();
if (hlock->waittime_stamp) { if (hlock->waittime_stamp) {
now = sched_clock(); now = lockstat_clock();
waittime = now - hlock->waittime_stamp; waittime = now - hlock->waittime_stamp;
hlock->holdtime_stamp = now; hlock->holdtime_stamp = now;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment