Commit c78a9b9b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of...

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  ftrace: Revert 8ab2b7ef ftrace: Remove unnecessary disabling of irqs
  kprobes/trace: Fix kprobe selftest for gcc 4.6
  ftrace: Fix possible undefined return code
  oprofile, dcookies: Fix possible circular locking dependency
  oprofile: Fix locking dependency in sync_start()
  oprofile: Free potentially owned tasks in case of errors
  oprofile, x86: Add comments to IBS LVT offset initialization
parents 842c895d 5f127133
...@@ -390,7 +390,8 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new) ...@@ -390,7 +390,8 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
/* /*
* If mask=1, the LVT entry does not generate interrupts while mask=0 * If mask=1, the LVT entry does not generate interrupts while mask=0
* enables the vector. See also the BKDGs. * enables the vector. See also the BKDGs. Must be called with
* preemption disabled.
*/ */
int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
......
...@@ -609,16 +609,21 @@ static int setup_ibs_ctl(int ibs_eilvt_off) ...@@ -609,16 +609,21 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
return 0; return 0;
} }
/*
* This runs only on the current cpu. We try to find an LVT offset and
* setup the local APIC. For this we must disable preemption. On
* success we initialize all nodes with this offset. This updates then
* the offset in the IBS_CTL per-node msr. The per-core APIC setup of
* the IBS interrupt vector is called from op_amd_setup_ctrs()/op_-
* amd_cpu_shutdown() using the new offset.
*/
static int force_ibs_eilvt_setup(void) static int force_ibs_eilvt_setup(void)
{ {
int offset; int offset;
int ret; int ret;
/*
* find the next free available EILVT entry, skip offset 0,
* pin search to this cpu
*/
preempt_disable(); preempt_disable();
/* find the next free available EILVT entry, skip offset 0 */
for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
if (get_eilvt(offset)) if (get_eilvt(offset))
break; break;
......
...@@ -141,6 +141,13 @@ static struct notifier_block module_load_nb = { ...@@ -141,6 +141,13 @@ static struct notifier_block module_load_nb = {
.notifier_call = module_load_notify, .notifier_call = module_load_notify,
}; };
static void free_all_tasks(void)
{
/* make sure we don't leak task structs */
process_task_mortuary();
process_task_mortuary();
}
int sync_start(void) int sync_start(void)
{ {
int err; int err;
...@@ -148,8 +155,6 @@ int sync_start(void) ...@@ -148,8 +155,6 @@ int sync_start(void)
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
mutex_lock(&buffer_mutex);
err = task_handoff_register(&task_free_nb); err = task_handoff_register(&task_free_nb);
if (err) if (err)
goto out1; goto out1;
...@@ -166,7 +171,6 @@ int sync_start(void) ...@@ -166,7 +171,6 @@ int sync_start(void)
start_cpu_work(); start_cpu_work();
out: out:
mutex_unlock(&buffer_mutex);
return err; return err;
out4: out4:
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
...@@ -174,6 +178,7 @@ int sync_start(void) ...@@ -174,6 +178,7 @@ int sync_start(void)
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
out2: out2:
task_handoff_unregister(&task_free_nb); task_handoff_unregister(&task_free_nb);
free_all_tasks();
out1: out1:
free_cpumask_var(marked_cpus); free_cpumask_var(marked_cpus);
goto out; goto out;
...@@ -182,20 +187,16 @@ int sync_start(void) ...@@ -182,20 +187,16 @@ int sync_start(void)
void sync_stop(void) void sync_stop(void)
{ {
/* flush buffers */
mutex_lock(&buffer_mutex);
end_cpu_work(); end_cpu_work();
unregister_module_notifier(&module_load_nb); unregister_module_notifier(&module_load_nb);
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
task_handoff_unregister(&task_free_nb); task_handoff_unregister(&task_free_nb);
mutex_unlock(&buffer_mutex); barrier(); /* do all of the above first */
flush_cpu_work();
/* make sure we don't leak task structs */ flush_cpu_work();
process_task_mortuary();
process_task_mortuary();
free_all_tasks();
free_cpumask_var(marked_cpus); free_cpumask_var(marked_cpus);
} }
......
...@@ -178,6 +178,8 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len) ...@@ -178,6 +178,8 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
/* FIXME: (deleted) ? */ /* FIXME: (deleted) ? */
path = d_path(&dcs->path, kbuf, PAGE_SIZE); path = d_path(&dcs->path, kbuf, PAGE_SIZE);
mutex_unlock(&dcookie_mutex);
if (IS_ERR(path)) { if (IS_ERR(path)) {
err = PTR_ERR(path); err = PTR_ERR(path);
goto out_free; goto out_free;
...@@ -194,6 +196,7 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len) ...@@ -194,6 +196,7 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
out_free: out_free:
kfree(kbuf); kfree(kbuf);
return err;
out: out:
mutex_unlock(&dcookie_mutex); mutex_unlock(&dcookie_mutex);
return err; return err;
......
...@@ -2740,7 +2740,7 @@ static int ftrace_process_regex(struct ftrace_hash *hash, ...@@ -2740,7 +2740,7 @@ static int ftrace_process_regex(struct ftrace_hash *hash,
{ {
char *func, *command, *next = buff; char *func, *command, *next = buff;
struct ftrace_func_command *p; struct ftrace_func_command *p;
int ret; int ret = -EINVAL;
func = strsep(&next, ":"); func = strsep(&next, ":");
...@@ -3330,6 +3330,7 @@ static int ftrace_process_locs(struct module *mod, ...@@ -3330,6 +3330,7 @@ static int ftrace_process_locs(struct module *mod,
{ {
unsigned long *p; unsigned long *p;
unsigned long addr; unsigned long addr;
unsigned long flags;
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
p = start; p = start;
...@@ -3346,7 +3347,13 @@ static int ftrace_process_locs(struct module *mod, ...@@ -3346,7 +3347,13 @@ static int ftrace_process_locs(struct module *mod,
ftrace_record_ip(addr); ftrace_record_ip(addr);
} }
/*
* Disable interrupts to prevent interrupts from executing
* code that is being modified.
*/
local_irq_save(flags);
ftrace_update_code(mod); ftrace_update_code(mod);
local_irq_restore(flags);
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
return 0; return 0;
......
...@@ -1870,8 +1870,12 @@ fs_initcall(init_kprobe_trace); ...@@ -1870,8 +1870,12 @@ fs_initcall(init_kprobe_trace);
#ifdef CONFIG_FTRACE_STARTUP_TEST #ifdef CONFIG_FTRACE_STARTUP_TEST
static int kprobe_trace_selftest_target(int a1, int a2, int a3, /*
int a4, int a5, int a6) * The "__used" keeps gcc from removing the function symbol
* from the kallsyms table.
*/
static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
int a4, int a5, int a6)
{ {
return a1 + a2 + a3 + a4 + a5 + a6; return a1 + a2 + a3 + a4 + a5 + a6;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment