Commit a048d3af authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'tracing-fixes-for-linus' of...

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  ftrace: fix modular build
  ftrace: disable tracing on acpi idle calls
  ftrace: remove latency-tracer leftover
  ftrace: only trace preempt off with preempt tracer
  ftrace: fix 4d3702b6 (post-v2.6.26): WARNING: at kernel/lockdep.c:2731 check_flags (ftrace)
parents fb3b8061 1fe37104
...@@ -272,6 +272,8 @@ static atomic_t c3_cpu_count; ...@@ -272,6 +272,8 @@ static atomic_t c3_cpu_count;
/* Common C-state entry for C2, C3, .. */ /* Common C-state entry for C2, C3, .. */
static void acpi_cstate_enter(struct acpi_processor_cx *cstate) static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
{ {
/* Don't trace irqs off for idle */
stop_critical_timings();
if (cstate->entry_method == ACPI_CSTATE_FFH) { if (cstate->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */ /* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cstate); acpi_processor_ffh_cstate_enter(cstate);
...@@ -284,6 +286,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) ...@@ -284,6 +286,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
gets asserted in time to freeze execution properly. */ gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address); unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
} }
start_critical_timings();
} }
#endif /* !CONFIG_CPU_IDLE */ #endif /* !CONFIG_CPU_IDLE */
...@@ -1418,6 +1421,8 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, ...@@ -1418,6 +1421,8 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
*/ */
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{ {
/* Don't trace irqs off for idle */
stop_critical_timings();
if (cx->entry_method == ACPI_CSTATE_FFH) { if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */ /* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx); acpi_processor_ffh_cstate_enter(cx);
...@@ -1432,6 +1437,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) ...@@ -1432,6 +1437,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
gets asserted in time to freeze execution properly. */ gets asserted in time to freeze execution properly. */
unused = inl(acpi_gbl_FADT.xpm_timer_block.address); unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
} }
start_critical_timings();
} }
/** /**
......
...@@ -1203,9 +1203,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) ...@@ -1203,9 +1203,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
iter->pos = *pos; iter->pos = *pos;
if (last_ent && !ent)
seq_puts(m, "\n\nvim:ft=help\n");
return ent; return ent;
} }
......
...@@ -253,12 +253,14 @@ void start_critical_timings(void) ...@@ -253,12 +253,14 @@ void start_critical_timings(void)
if (preempt_trace() || irq_trace()) if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
} }
EXPORT_SYMBOL_GPL(start_critical_timings);
void stop_critical_timings(void) void stop_critical_timings(void)
{ {
if (preempt_trace() || irq_trace()) if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
} }
EXPORT_SYMBOL_GPL(stop_critical_timings);
#ifdef CONFIG_IRQSOFF_TRACER #ifdef CONFIG_IRQSOFF_TRACER
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
...@@ -337,11 +339,13 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); ...@@ -337,11 +339,13 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
#ifdef CONFIG_PREEMPT_TRACER #ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1) void trace_preempt_on(unsigned long a0, unsigned long a1)
{ {
if (preempt_trace())
stop_critical_timing(a0, a1); stop_critical_timing(a0, a1);
} }
void trace_preempt_off(unsigned long a0, unsigned long a1) void trace_preempt_off(unsigned long a0, unsigned long a1)
{ {
if (preempt_trace())
start_critical_timing(a0, a1); start_critical_timing(a0, a1);
} }
#endif /* CONFIG_PREEMPT_TRACER */ #endif /* CONFIG_PREEMPT_TRACER */
......
...@@ -26,7 +26,8 @@ static struct task_struct *wakeup_task; ...@@ -26,7 +26,8 @@ static struct task_struct *wakeup_task;
static int wakeup_cpu; static int wakeup_cpu;
static unsigned wakeup_prio = -1; static unsigned wakeup_prio = -1;
static DEFINE_SPINLOCK(wakeup_lock); static raw_spinlock_t wakeup_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static void __wakeup_reset(struct trace_array *tr); static void __wakeup_reset(struct trace_array *tr);
...@@ -56,7 +57,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) ...@@ -56,7 +57,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
if (unlikely(disabled != 1)) if (unlikely(disabled != 1))
goto out; goto out;
spin_lock_irqsave(&wakeup_lock, flags); local_irq_save(flags);
__raw_spin_lock(&wakeup_lock);
if (unlikely(!wakeup_task)) if (unlikely(!wakeup_task))
goto unlock; goto unlock;
...@@ -71,7 +73,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) ...@@ -71,7 +73,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
trace_function(tr, data, ip, parent_ip, flags); trace_function(tr, data, ip, parent_ip, flags);
unlock: unlock:
spin_unlock_irqrestore(&wakeup_lock, flags); __raw_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
out: out:
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
...@@ -145,7 +148,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, ...@@ -145,7 +148,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
if (likely(disabled != 1)) if (likely(disabled != 1))
goto out; goto out;
spin_lock_irqsave(&wakeup_lock, flags); local_irq_save(flags);
__raw_spin_lock(&wakeup_lock);
/* We could race with grabbing wakeup_lock */ /* We could race with grabbing wakeup_lock */
if (unlikely(!tracer_enabled || next != wakeup_task)) if (unlikely(!tracer_enabled || next != wakeup_task))
...@@ -174,7 +178,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, ...@@ -174,7 +178,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
out_unlock: out_unlock:
__wakeup_reset(tr); __wakeup_reset(tr);
spin_unlock_irqrestore(&wakeup_lock, flags); __raw_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&tr->data[cpu]->disabled);
} }
...@@ -209,8 +214,6 @@ static void __wakeup_reset(struct trace_array *tr) ...@@ -209,8 +214,6 @@ static void __wakeup_reset(struct trace_array *tr)
struct trace_array_cpu *data; struct trace_array_cpu *data;
int cpu; int cpu;
assert_spin_locked(&wakeup_lock);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
data = tr->data[cpu]; data = tr->data[cpu];
tracing_reset(data); tracing_reset(data);
...@@ -229,9 +232,11 @@ static void wakeup_reset(struct trace_array *tr) ...@@ -229,9 +232,11 @@ static void wakeup_reset(struct trace_array *tr)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&wakeup_lock, flags); local_irq_save(flags);
__raw_spin_lock(&wakeup_lock);
__wakeup_reset(tr); __wakeup_reset(tr);
spin_unlock_irqrestore(&wakeup_lock, flags); __raw_spin_unlock(&wakeup_lock);
local_irq_restore(flags);
} }
static void static void
...@@ -252,7 +257,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, ...@@ -252,7 +257,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
goto out; goto out;
/* interrupts should be off from try_to_wake_up */ /* interrupts should be off from try_to_wake_up */
spin_lock(&wakeup_lock); __raw_spin_lock(&wakeup_lock);
/* check for races. */ /* check for races. */
if (!tracer_enabled || p->prio >= wakeup_prio) if (!tracer_enabled || p->prio >= wakeup_prio)
...@@ -274,7 +279,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, ...@@ -274,7 +279,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
CALLER_ADDR1, CALLER_ADDR2, flags); CALLER_ADDR1, CALLER_ADDR2, flags);
out_locked: out_locked:
spin_unlock(&wakeup_lock); __raw_spin_unlock(&wakeup_lock);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&tr->data[cpu]->disabled);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment