Commit eb487ab4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-fixes-for-linus' of...

Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  perf: Fix reading in perf_event_read()
  watchdog: Don't change watchdog state on read of sysctl
  watchdog: Fix sysctl consistency
  watchdog: Fix broken nowatchdog logic
  perf: Fix Pentium4 raw event validation
  perf: Fix alloc_callchain_buffers()
parents 0b0abeaf 542e72fc
...@@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event) ...@@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event)
* if an event is shared accross the logical threads * if an event is shared accross the logical threads
* the user needs special permissions to be able to use it * the user needs special permissions to be able to use it
*/ */
if (p4_event_bind_map[v].shared) { if (p4_ht_active() && p4_event_bind_map[v].shared) {
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return -EACCES; return -EACCES;
} }
...@@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event) ...@@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event)
event->hw.config = p4_set_ht_bit(event->hw.config); event->hw.config = p4_set_ht_bit(event->hw.config);
if (event->attr.type == PERF_TYPE_RAW) { if (event->attr.type == PERF_TYPE_RAW) {
struct p4_event_bind *bind;
unsigned int esel;
/* /*
* Clear bits we reserve to be managed by kernel itself * Clear bits we reserve to be managed by kernel itself
* and never allowed from a user space * and never allowed from a user space
...@@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event) ...@@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event)
* bits since we keep additional info here (for cache events and etc) * bits since we keep additional info here (for cache events and etc)
*/ */
event->hw.config |= event->attr.config; event->hw.config |= event->attr.config;
bind = p4_config_get_bind(event->attr.config);
if (!bind) {
rc = -EINVAL;
goto out;
}
esel = P4_OPCODE_ESEL(bind->opcode);
event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
} }
rc = x86_setup_perfctr(event); rc = x86_setup_perfctr(event);
......
...@@ -1901,11 +1901,12 @@ static void __perf_event_read(void *info) ...@@ -1901,11 +1901,12 @@ static void __perf_event_read(void *info)
return; return;
raw_spin_lock(&ctx->lock); raw_spin_lock(&ctx->lock);
if (ctx->is_active)
update_context_time(ctx); update_context_time(ctx);
update_event_times(event); update_event_times(event);
raw_spin_unlock(&ctx->lock); if (event->state == PERF_EVENT_STATE_ACTIVE)
event->pmu->read(event); event->pmu->read(event);
raw_spin_unlock(&ctx->lock);
} }
static inline u64 perf_event_count(struct perf_event *event) static inline u64 perf_event_count(struct perf_event *event)
...@@ -1999,8 +2000,7 @@ static int alloc_callchain_buffers(void) ...@@ -1999,8 +2000,7 @@ static int alloc_callchain_buffers(void)
* accessed from NMI. Use a temporary manual per cpu allocation * accessed from NMI. Use a temporary manual per cpu allocation
* until that gets sorted out. * until that gets sorted out.
*/ */
size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) * size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
num_possible_cpus();
entries = kzalloc(size, GFP_KERNEL); entries = kzalloc(size, GFP_KERNEL);
if (!entries) if (!entries)
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
int watchdog_enabled; int watchdog_enabled = 1;
int __read_mostly softlockup_thresh = 60; int __read_mostly softlockup_thresh = 60;
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
...@@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); ...@@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
#endif #endif
static int no_watchdog;
/* boot commands */ /* boot commands */
/* /*
* Should we panic when a soft-lockup or hard-lockup occurs: * Should we panic when a soft-lockup or hard-lockup occurs:
...@@ -58,7 +55,7 @@ static int __init hardlockup_panic_setup(char *str) ...@@ -58,7 +55,7 @@ static int __init hardlockup_panic_setup(char *str)
if (!strncmp(str, "panic", 5)) if (!strncmp(str, "panic", 5))
hardlockup_panic = 1; hardlockup_panic = 1;
else if (!strncmp(str, "0", 1)) else if (!strncmp(str, "0", 1))
no_watchdog = 1; watchdog_enabled = 0;
return 1; return 1;
} }
__setup("nmi_watchdog=", hardlockup_panic_setup); __setup("nmi_watchdog=", hardlockup_panic_setup);
...@@ -77,7 +74,7 @@ __setup("softlockup_panic=", softlockup_panic_setup); ...@@ -77,7 +74,7 @@ __setup("softlockup_panic=", softlockup_panic_setup);
static int __init nowatchdog_setup(char *str) static int __init nowatchdog_setup(char *str)
{ {
no_watchdog = 1; watchdog_enabled = 0;
return 1; return 1;
} }
__setup("nowatchdog", nowatchdog_setup); __setup("nowatchdog", nowatchdog_setup);
...@@ -85,7 +82,7 @@ __setup("nowatchdog", nowatchdog_setup); ...@@ -85,7 +82,7 @@ __setup("nowatchdog", nowatchdog_setup);
/* deprecated */ /* deprecated */
static int __init nosoftlockup_setup(char *str) static int __init nosoftlockup_setup(char *str)
{ {
no_watchdog = 1; watchdog_enabled = 0;
return 1; return 1;
} }
__setup("nosoftlockup", nosoftlockup_setup); __setup("nosoftlockup", nosoftlockup_setup);
...@@ -432,9 +429,6 @@ static int watchdog_enable(int cpu) ...@@ -432,9 +429,6 @@ static int watchdog_enable(int cpu)
wake_up_process(p); wake_up_process(p);
} }
/* if any cpu succeeds, watchdog is considered enabled for the system */
watchdog_enabled = 1;
return 0; return 0;
} }
...@@ -462,12 +456,16 @@ static void watchdog_disable(int cpu) ...@@ -462,12 +456,16 @@ static void watchdog_disable(int cpu)
static void watchdog_enable_all_cpus(void) static void watchdog_enable_all_cpus(void)
{ {
int cpu; int cpu;
int result = 0;
watchdog_enabled = 0;
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
result += watchdog_enable(cpu); if (!watchdog_enable(cpu))
/* if any cpu succeeds, watchdog is considered
enabled for the system */
watchdog_enabled = 1;
if (result) if (!watchdog_enabled)
printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
} }
...@@ -476,9 +474,6 @@ static void watchdog_disable_all_cpus(void) ...@@ -476,9 +474,6 @@ static void watchdog_disable_all_cpus(void)
{ {
int cpu; int cpu;
if (no_watchdog)
return;
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
watchdog_disable(cpu); watchdog_disable(cpu);
...@@ -498,10 +493,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write, ...@@ -498,10 +493,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write,
{ {
proc_dointvec(table, write, buffer, length, ppos); proc_dointvec(table, write, buffer, length, ppos);
if (write) {
if (watchdog_enabled) if (watchdog_enabled)
watchdog_enable_all_cpus(); watchdog_enable_all_cpus();
else else
watchdog_disable_all_cpus(); watchdog_disable_all_cpus();
}
return 0; return 0;
} }
...@@ -530,6 +527,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -530,6 +527,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
break; break;
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
if (watchdog_enabled)
err = watchdog_enable(hotcpu); err = watchdog_enable(hotcpu);
break; break;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
...@@ -555,9 +553,6 @@ void __init lockup_detector_init(void) ...@@ -555,9 +553,6 @@ void __init lockup_detector_init(void)
void *cpu = (void *)(long)smp_processor_id(); void *cpu = (void *)(long)smp_processor_id();
int err; int err;
if (no_watchdog)
return;
err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
WARN_ON(notifier_to_errno(err)); WARN_ON(notifier_to_errno(err));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment