Commit 6ec22f9b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-debug-for-linus' of...

Merge branch 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: Limit number of per cpu TSC sync messages
  x86: dumpstack, 64-bit: Disable preemption when walking the IRQ/exception stacks
  x86: dumpstack: Clean up the x86_stack_ids[][] initalization and other details
  x86, cpu: mv display_cacheinfo -> cpu_detect_cache_sizes
  x86: Suppress stack overrun message for init_task
  x86: Fix cpu_devs[] initialization in early_cpu_init()
  x86: Remove CPU cache size output for non-Intel too
  x86: Minimise printk spew from per-vendor init code
  x86: Remove the CPU cache size printk's
  cpumask: Avoid cpumask_t in arch/x86/kernel/apic/nmi.c
  x86: Make sure we also print a Code: line for show_regs()
parents 83be7d76 9b3660a5
......@@ -39,7 +39,8 @@
int unknown_nmi_panic;
int nmi_watchdog_enabled;
static cpumask_t backtrace_mask __read_mostly;
/* For reliability, we're prepared to waste bits here. */
static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
/* nmi_active:
* >0: the lapic NMI watchdog is active, but can be disabled
......@@ -414,7 +415,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
}
/* We can be called before check_nmi_watchdog, hence NULL check. */
if (cpumask_test_cpu(cpu, &backtrace_mask)) {
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
static DEFINE_SPINLOCK(lock); /* Serialise the printks */
spin_lock(&lock);
......@@ -422,7 +423,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
show_regs(regs);
dump_stack();
spin_unlock(&lock);
cpumask_clear_cpu(cpu, &backtrace_mask);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
rc = 1;
}
......@@ -558,14 +559,14 @@ void arch_trigger_all_cpu_backtrace(void)
{
int i;
cpumask_copy(&backtrace_mask, cpu_online_mask);
cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
printk(KERN_INFO "sending NMI to all CPUs:\n");
apic->send_IPI_all(NMI_VECTOR);
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
for (i = 0; i < 10 * 1000; i++) {
if (cpumask_empty(&backtrace_mask))
if (cpumask_empty(to_cpumask(backtrace_mask)))
break;
mdelay(1);
}
......
......@@ -535,7 +535,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
}
}
display_cacheinfo(c);
cpu_detect_cache_sizes(c);
/* Multi core CPU? */
if (c->extended_cpuid_level >= 0x80000008) {
......
......@@ -294,7 +294,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
}
display_cacheinfo(c);
cpu_detect_cache_sizes(c);
}
enum {
......
......@@ -61,7 +61,7 @@ void __init setup_cpu_local_masks(void)
static void __cpuinit default_init(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_64
display_cacheinfo(c);
cpu_detect_cache_sizes(c);
#else
/* Not much we can do here... */
/* Check if at least it has cpuid */
......@@ -383,7 +383,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
}
}
void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
{
unsigned int n, dummy, ebx, ecx, edx, l2size;
......@@ -391,8 +391,6 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
if (n >= 0x80000005) {
cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
c->x86_cache_size = (ecx>>24) + (edx>>24);
#ifdef CONFIG_X86_64
/* On K8 L1 TLB is inclusive, so don't count it */
......@@ -422,9 +420,6 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
#endif
c->x86_cache_size = l2size;
printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
l2size, ecx & 0xFF);
}
void __cpuinit detect_ht(struct cpuinfo_x86 *c)
......@@ -659,24 +654,31 @@ void __init early_cpu_init(void)
const struct cpu_dev *const *cdev;
int count = 0;
#ifdef PROCESSOR_SELECT
printk(KERN_INFO "KERNEL supported cpus:\n");
#endif
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
const struct cpu_dev *cpudev = *cdev;
unsigned int j;
if (count >= X86_VENDOR_NUM)
break;
cpu_devs[count] = cpudev;
count++;
for (j = 0; j < 2; j++) {
if (!cpudev->c_ident[j])
continue;
printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
cpudev->c_ident[j]);
#ifdef PROCESSOR_SELECT
{
unsigned int j;
for (j = 0; j < 2; j++) {
if (!cpudev->c_ident[j])
continue;
printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
cpudev->c_ident[j]);
}
}
#endif
}
early_identify_cpu(&boot_cpu_data);
}
......
......@@ -32,6 +32,6 @@ struct cpu_dev {
extern const struct cpu_dev *const __x86_cpu_dev_start[],
*const __x86_cpu_dev_end[];
extern void display_cacheinfo(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
#endif
......@@ -373,7 +373,7 @@ static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
/* Handle the GX (Formally known as the GX2) */
if (c->x86 == 5 && c->x86_model == 5)
display_cacheinfo(c);
cpu_detect_cache_sizes(c);
else
init_cyrix(c);
}
......
......@@ -488,22 +488,6 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
#endif
}
if (trace)
printk(KERN_INFO "CPU: Trace cache: %dK uops", trace);
else if (l1i)
printk(KERN_INFO "CPU: L1 I cache: %dK", l1i);
if (l1d)
printk(KERN_CONT ", L1 D cache: %dK\n", l1d);
else
printk(KERN_CONT "\n");
if (l2)
printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
if (l3)
printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
return l2;
......
......@@ -26,7 +26,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
early_init_transmeta(c);
display_cacheinfo(c);
cpu_detect_cache_sizes(c);
/* Print CMS and CPU revision */
max = cpuid_eax(0x80860000);
......
......@@ -10,9 +10,9 @@
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/kexec.h>
#include <linux/sysfs.h>
#include <linux/bug.h>
#include <linux/nmi.h>
#include <linux/sysfs.h>
#include <asm/stacktrace.h>
......@@ -35,6 +35,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
if (!stack) {
unsigned long dummy;
stack = &dummy;
if (task && task != current)
stack = (unsigned long *)task->thread.sp;
......@@ -57,8 +58,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
context = (struct thread_info *)
((unsigned long)stack & (~(THREAD_SIZE - 1)));
bp = print_context_stack(context, stack, bp, ops,
data, NULL, &graph);
bp = print_context_stack(context, stack, bp, ops, data, NULL, &graph);
stack = (unsigned long *)context->previous_esp;
if (!stack)
......@@ -72,7 +72,7 @@ EXPORT_SYMBOL(dump_trace);
void
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *sp, unsigned long bp, char *log_lvl)
unsigned long *sp, unsigned long bp, char *log_lvl)
{
unsigned long *stack;
int i;
......@@ -156,4 +156,3 @@ int is_valid_bugaddr(unsigned long ip)
return ud2 == 0x0b0f;
}
......@@ -10,26 +10,28 @@
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/kexec.h>
#include <linux/sysfs.h>
#include <linux/bug.h>
#include <linux/nmi.h>
#include <linux/sysfs.h>
#include <asm/stacktrace.h>
#include "dumpstack.h"
#define N_EXCEPTION_STACKS_END \
(N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
static char x86_stack_ids[][8] = {
[DEBUG_STACK - 1] = "#DB",
[NMI_STACK - 1] = "NMI",
[DOUBLEFAULT_STACK - 1] = "#DF",
[STACKFAULT_STACK - 1] = "#SS",
[MCE_STACK - 1] = "#MC",
[ DEBUG_STACK-1 ] = "#DB",
[ NMI_STACK-1 ] = "NMI",
[ DOUBLEFAULT_STACK-1 ] = "#DF",
[ STACKFAULT_STACK-1 ] = "#SS",
[ MCE_STACK-1 ] = "#MC",
#if DEBUG_STKSZ > EXCEPTION_STKSZ
[N_EXCEPTION_STACKS ...
N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
[ N_EXCEPTION_STACKS ...
N_EXCEPTION_STACKS_END ] = "#DB[?]"
#endif
};
};
int x86_is_stack_id(int id, char *name)
{
......@@ -37,7 +39,7 @@ int x86_is_stack_id(int id, char *name)
}
static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
unsigned *usedp, char **idp)
unsigned *usedp, char **idp)
{
unsigned k;
......@@ -202,21 +204,24 @@ EXPORT_SYMBOL(dump_trace);
void
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *sp, unsigned long bp, char *log_lvl)
unsigned long *sp, unsigned long bp, char *log_lvl)
{
unsigned long *irq_stack_end;
unsigned long *irq_stack;
unsigned long *stack;
int cpu;
int i;
const int cpu = smp_processor_id();
unsigned long *irq_stack_end =
(unsigned long *)(per_cpu(irq_stack_ptr, cpu));
unsigned long *irq_stack =
(unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
preempt_disable();
cpu = smp_processor_id();
irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
/*
* debugging aid: "show_stack(NULL, NULL);" prints the
* back trace for this cpu.
* Debugging aid: "show_stack(NULL, NULL);" prints the
* back trace for this cpu:
*/
if (sp == NULL) {
if (task)
sp = (unsigned long *)task->thread.sp;
......@@ -240,6 +245,8 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk(" %016lx", *stack++);
touch_nmi_watchdog();
}
preempt_enable();
printk("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
......@@ -303,4 +310,3 @@ int is_valid_bugaddr(unsigned long ip)
return ud2 == 0x0b0f;
}
......@@ -188,7 +188,7 @@ void __show_regs(struct pt_regs *regs, int all)
void show_regs(struct pt_regs *regs)
{
__show_regs(regs, 1);
show_registers(regs);
show_trace(NULL, regs, &regs->sp, regs->bp);
}
......
......@@ -227,8 +227,7 @@ void __show_regs(struct pt_regs *regs, int all)
void show_regs(struct pt_regs *regs)
{
printk(KERN_INFO "CPU %d:", smp_processor_id());
__show_regs(regs, 1);
show_registers(regs);
show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
}
......
......@@ -114,13 +114,12 @@ void __cpuinit check_tsc_sync_source(int cpu)
return;
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
printk_once(KERN_INFO "Skipping synchronization checks as TSC is reliable.\n");
if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
pr_info(
"Skipped synchronization checks as TSC is reliable.\n");
return;
}
pr_info("checking TSC synchronization [CPU#%d -> CPU#%d]:",
smp_processor_id(), cpu);
/*
* Reset it - in case this is a second bootup:
*/
......@@ -142,12 +141,14 @@ void __cpuinit check_tsc_sync_source(int cpu)
cpu_relax();
if (nr_warps) {
printk("\n");
pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n",
smp_processor_id(), cpu);
pr_warning("Measured %Ld cycles TSC warp between CPUs, "
"turning off TSC clock.\n", max_warp);
mark_tsc_unstable("check_tsc_sync_source failed");
} else {
printk(" passed.\n");
pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
smp_processor_id(), cpu);
}
/*
......
......@@ -659,7 +659,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
show_fault_oops(regs, error_code, address);
stackend = end_of_stack(tsk);
if (*stackend != STACK_END_MAGIC)
if (tsk != &init_task && *stackend != STACK_END_MAGIC)
printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
tsk->thread.cr2 = address;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment