Commit 81a07d75 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-64'

* x86-64: (83 commits)
  [PATCH] x86_64: x86_64 stack usage debugging
  [PATCH] x86_64: (resend) x86_64 stack overflow debugging
  [PATCH] x86_64: msi_apic.c build fix
  [PATCH] x86_64: i386/x86-64 Add nmi watchdog support for new Intel CPUs
  [PATCH] x86_64: Avoid broadcasting NMI IPIs
  [PATCH] x86_64: fix apic error on bootup
  [PATCH] x86_64: enlarge window for stack growth
  [PATCH] x86_64: Minor string functions optimizations
  [PATCH] x86_64: Move export symbols to their C functions
  [PATCH] x86_64: Standardize i386/x86_64 handling of NMI_VECTOR
  [PATCH] x86_64: Fix modular pc speaker
  [PATCH] x86_64: remove sys32_ni_syscall()
  [PATCH] x86_64: Do not use -ffunction-sections for modules
  [PATCH] x86_64: Add cpu_relax to apic_wait_icr_idle
  [PATCH] x86_64: adjust kstack_depth_to_print default
  [PATCH] i386/x86-64: adjust /proc/interrupts column headings
  [PATCH] x86_64: Fix race in cpu_local_* on preemptible kernels
  [PATCH] x86_64: Fix fast check in safe_smp_processor_id
  [PATCH] x86_64: x86_64 setup.c - printing cmp related boottime information
  [PATCH] i386/x86-64/ia64: Move polling flag into thread_info_status
  ...

Manual resolve of trivial conflict in arch/i386/kernel/Makefile
parents 8871e73f 8501a2fb
...@@ -205,6 +205,27 @@ IOMMU ...@@ -205,6 +205,27 @@ IOMMU
pages Prereserve that many 128K pages for the software IO bounce buffering. pages Prereserve that many 128K pages for the software IO bounce buffering.
force Force all IO through the software TLB. force Force all IO through the software TLB.
calgary=[64k,128k,256k,512k,1M,2M,4M,8M]
calgary=[translate_empty_slots]
calgary=[disable=<PCI bus number>]
64k,...,8M - Set the size of each PCI slot's translation table
when using the Calgary IOMMU. This is the size of the translation
table itself in main memory. The smallest table, 64k, covers an IO
space of 32MB; the largest, 8MB table, can cover an IO space of
4GB. Normally the kernel will make the right choice by itself.
translate_empty_slots - Enable translation even on slots that have
no devices attached to them, in case a device will be hotplugged
in the future.
disable=<PCI bus number> - Disable translation on a given PHB. For
example, the built-in graphics adapter resides on the first bridge
(PCI bus number 0); if translation (isolation) is enabled on this
bridge, X servers that access the hardware directly from user
space might stop working. Use this option if you have devices that
are accessed from userspace directly on some PCI host bridge.
Debugging Debugging
oops=panic Always panic on oopses. Default is to just kill the process, oops=panic Always panic on oopses. Default is to just kill the process,
......
...@@ -328,6 +328,15 @@ config X86_MCE_P4THERMAL ...@@ -328,6 +328,15 @@ config X86_MCE_P4THERMAL
Enabling this feature will cause a message to be printed when the P4 Enabling this feature will cause a message to be printed when the P4
enters thermal throttling. enters thermal throttling.
config VM86
default y
bool "Enable VM86 support" if EMBEDDED
help
This option is required by programs like DOSEMU to run 16-bit legacy
code on X86 processors. It also may be needed by software like
XFree86 to initialize some video cards via BIOS. Disabling this
option saves about 6k.
config TOSHIBA config TOSHIBA
tristate "Toshiba Laptop support" tristate "Toshiba Laptop support"
---help--- ---help---
...@@ -1068,6 +1077,10 @@ config SCx200HR_TIMER ...@@ -1068,6 +1077,10 @@ config SCx200HR_TIMER
processor goes idle (as is done by the scheduler). The processor goes idle (as is done by the scheduler). The
other workaround is idle=poll boot option. other workaround is idle=poll boot option.
config K8_NB
def_bool y
depends on AGP_AMD64
source "drivers/pcmcia/Kconfig" source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig" source "drivers/pci/hotplug/Kconfig"
......
...@@ -109,8 +109,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf ...@@ -109,8 +109,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
isoimage: $(BOOTIMAGE) isoimage: $(BOOTIMAGE)
-rm -rf $(obj)/isoimage -rm -rf $(obj)/isoimage
mkdir $(obj)/isoimage mkdir $(obj)/isoimage
cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ for i in lib lib64 share end ; do \
$(obj)/isoimage if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
break ; \
fi ; \
if [ $$i = end ] ; then exit 1 ; fi ; \
done
cp $(BOOTIMAGE) $(obj)/isoimage/linux cp $(BOOTIMAGE) $(obj)/isoimage/linux
echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
if [ -f '$(FDINITRD)' ] ; then \ if [ -f '$(FDINITRD)' ] ; then \
......
...@@ -24,14 +24,6 @@ ...@@ -24,14 +24,6 @@
#undef memset #undef memset
#undef memcpy #undef memcpy
/*
* Why do we do this? Don't ask me..
*
* Incomprehensible are the ways of bootloaders.
*/
static void* memset(void *, int, size_t);
static void* memcpy(void *, __const void *, size_t);
#define memzero(s, n) memset ((s), 0, (n)) #define memzero(s, n) memset ((s), 0, (n))
typedef unsigned char uch; typedef unsigned char uch;
...@@ -93,7 +85,7 @@ static unsigned char *real_mode; /* Pointer to real-mode data */ ...@@ -93,7 +85,7 @@ static unsigned char *real_mode; /* Pointer to real-mode data */
#endif #endif
#define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0)) #define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
extern char input_data[]; extern unsigned char input_data[];
extern int input_len; extern int input_len;
static long bytes_out = 0; static long bytes_out = 0;
...@@ -103,6 +95,9 @@ static unsigned long output_ptr = 0; ...@@ -103,6 +95,9 @@ static unsigned long output_ptr = 0;
static void *malloc(int size); static void *malloc(int size);
static void free(void *where); static void free(void *where);
static void *memset(void *s, int c, unsigned n);
static void *memcpy(void *dest, const void *src, unsigned n);
static void putstr(const char *); static void putstr(const char *);
extern int end; extern int end;
...@@ -205,7 +200,7 @@ static void putstr(const char *s) ...@@ -205,7 +200,7 @@ static void putstr(const char *s)
outb_p(0xff & (pos >> 1), vidport+1); outb_p(0xff & (pos >> 1), vidport+1);
} }
static void* memset(void* s, int c, size_t n) static void* memset(void* s, int c, unsigned n)
{ {
int i; int i;
char *ss = (char*)s; char *ss = (char*)s;
...@@ -214,14 +209,13 @@ static void* memset(void* s, int c, size_t n) ...@@ -214,14 +209,13 @@ static void* memset(void* s, int c, size_t n)
return s; return s;
} }
static void* memcpy(void* __dest, __const void* __src, static void* memcpy(void* dest, const void* src, unsigned n)
size_t __n)
{ {
int i; int i;
char *d = (char *)__dest, *s = (char *)__src; char *d = (char *)dest, *s = (char *)src;
for (i=0;i<__n;i++) d[i] = s[i]; for (i=0;i<n;i++) d[i] = s[i];
return __dest; return dest;
} }
/* =========================================================================== /* ===========================================================================
...@@ -309,7 +303,7 @@ static void setup_normal_output_buffer(void) ...@@ -309,7 +303,7 @@ static void setup_normal_output_buffer(void)
#else #else
if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory"); if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
#endif #endif
output_data = (char *)__PHYSICAL_START; /* Normally Points to 1M */ output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
free_mem_end_ptr = (long)real_mode; free_mem_end_ptr = (long)real_mode;
} }
...@@ -324,11 +318,9 @@ static void setup_output_buffer_if_we_run_high(struct moveparams *mv) ...@@ -324,11 +318,9 @@ static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
#ifdef STANDARD_MEMORY_BIOS_CALL #ifdef STANDARD_MEMORY_BIOS_CALL
if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
#else #else
if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
(3*1024))
error("Less than 4MB of memory");
#endif #endif
mv->low_buffer_start = output_data = (char *)LOW_BUFFER_START; mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff; ? LOW_BUFFER_MAX : (unsigned int)real_mode) & ~0xfff;
low_buffer_size = low_buffer_end - LOW_BUFFER_START; low_buffer_size = low_buffer_end - LOW_BUFFER_START;
......
...@@ -37,6 +37,7 @@ obj-$(CONFIG_DOUBLEFAULT) += doublefault.o ...@@ -37,6 +37,7 @@ obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
obj-$(CONFIG_VM86) += vm86.o obj-$(CONFIG_VM86) += vm86.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_HPET_TIMER) += hpet.o obj-$(CONFIG_HPET_TIMER) += hpet.o
obj-$(CONFIG_K8_NB) += k8.o
EXTRA_AFLAGS := -traditional EXTRA_AFLAGS := -traditional
...@@ -76,3 +77,6 @@ SYSCFLAGS_vsyscall-syms.o = -r ...@@ -76,3 +77,6 @@ SYSCFLAGS_vsyscall-syms.o = -r
$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \ $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
$(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
$(call if_changed,syscall) $(call if_changed,syscall)
k8-y += ../../x86_64/kernel/k8.o
...@@ -4,27 +4,41 @@ ...@@ -4,27 +4,41 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/sections.h> #include <asm/sections.h>
#define DEBUG 0 static int no_replacement = 0;
#if DEBUG static int smp_alt_once = 0;
# define DPRINTK(fmt, args...) printk(fmt, args) static int debug_alternative = 0;
#else
# define DPRINTK(fmt, args...) static int __init noreplacement_setup(char *s)
#endif {
no_replacement = 1;
return 1;
}
static int __init bootonly(char *str)
{
smp_alt_once = 1;
return 1;
}
static int __init debug_alt(char *str)
{
debug_alternative = 1;
return 1;
}
__setup("noreplacement", noreplacement_setup);
__setup("smp-alt-boot", bootonly);
__setup("debug-alternative", debug_alt);
#define DPRINTK(fmt, args...) if (debug_alternative) \
printk(KERN_DEBUG fmt, args)
#ifdef GENERIC_NOP1
/* Use inline assembly to define this because the nops are defined /* Use inline assembly to define this because the nops are defined
as inline assembly strings in the include files and we cannot as inline assembly strings in the include files and we cannot
get them easily into strings. */ get them easily into strings. */
asm("\t.data\nintelnops: " asm("\t.data\nintelnops: "
GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
GENERIC_NOP7 GENERIC_NOP8); GENERIC_NOP7 GENERIC_NOP8);
asm("\t.data\nk8nops: " extern unsigned char intelnops[];
K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
K8_NOP7 K8_NOP8);
asm("\t.data\nk7nops: "
K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
K7_NOP7 K7_NOP8);
extern unsigned char intelnops[], k8nops[], k7nops[];
static unsigned char *intel_nops[ASM_NOP_MAX+1] = { static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
NULL, NULL,
intelnops, intelnops,
...@@ -36,6 +50,13 @@ static unsigned char *intel_nops[ASM_NOP_MAX+1] = { ...@@ -36,6 +50,13 @@ static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
intelnops + 1 + 2 + 3 + 4 + 5 + 6, intelnops + 1 + 2 + 3 + 4 + 5 + 6,
intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
}; };
#endif
#ifdef K8_NOP1
asm("\t.data\nk8nops: "
K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
K8_NOP7 K8_NOP8);
extern unsigned char k8nops[];
static unsigned char *k8_nops[ASM_NOP_MAX+1] = { static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
NULL, NULL,
k8nops, k8nops,
...@@ -47,6 +68,13 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = { ...@@ -47,6 +68,13 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
k8nops + 1 + 2 + 3 + 4 + 5 + 6, k8nops + 1 + 2 + 3 + 4 + 5 + 6,
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
}; };
#endif
#ifdef K7_NOP1
asm("\t.data\nk7nops: "
K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
K7_NOP7 K7_NOP8);
extern unsigned char k7nops[];
static unsigned char *k7_nops[ASM_NOP_MAX+1] = { static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
NULL, NULL,
k7nops, k7nops,
...@@ -58,6 +86,18 @@ static unsigned char *k7_nops[ASM_NOP_MAX+1] = { ...@@ -58,6 +86,18 @@ static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
k7nops + 1 + 2 + 3 + 4 + 5 + 6, k7nops + 1 + 2 + 3 + 4 + 5 + 6,
k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
}; };
#endif
#ifdef CONFIG_X86_64
extern char __vsyscall_0;
static inline unsigned char** find_nop_table(void)
{
return k8_nops;
}
#else /* CONFIG_X86_64 */
static struct nop { static struct nop {
int cpuid; int cpuid;
unsigned char **noptable; unsigned char **noptable;
...@@ -67,14 +107,6 @@ static struct nop { ...@@ -67,14 +107,6 @@ static struct nop {
{ -1, NULL } { -1, NULL }
}; };
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
extern u8 *__smp_locks[], *__smp_locks_end[];
extern u8 __smp_alt_begin[], __smp_alt_end[];
static unsigned char** find_nop_table(void) static unsigned char** find_nop_table(void)
{ {
unsigned char **noptable = intel_nops; unsigned char **noptable = intel_nops;
...@@ -89,6 +121,14 @@ static unsigned char** find_nop_table(void) ...@@ -89,6 +121,14 @@ static unsigned char** find_nop_table(void)
return noptable; return noptable;
} }
#endif /* CONFIG_X86_64 */
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
extern u8 *__smp_locks[], *__smp_locks_end[];
extern u8 __smp_alt_begin[], __smp_alt_end[];
/* Replace instructions with better alternatives for this CPU type. /* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with This runs before SMP is initialized to avoid SMP problems with
self modifying code. This implies that assymetric systems where self modifying code. This implies that assymetric systems where
...@@ -99,6 +139,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) ...@@ -99,6 +139,7 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{ {
unsigned char **noptable = find_nop_table(); unsigned char **noptable = find_nop_table();
struct alt_instr *a; struct alt_instr *a;
u8 *instr;
int diff, i, k; int diff, i, k;
DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end); DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
...@@ -106,7 +147,16 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end) ...@@ -106,7 +147,16 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
BUG_ON(a->replacementlen > a->instrlen); BUG_ON(a->replacementlen > a->instrlen);
if (!boot_cpu_has(a->cpuid)) if (!boot_cpu_has(a->cpuid))
continue; continue;
memcpy(a->instr, a->replacement, a->replacementlen); instr = a->instr;
#ifdef CONFIG_X86_64
/* vsyscall code is not mapped yet. resolve it manually. */
if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
DPRINTK("%s: vsyscall fixup: %p => %p\n",
__FUNCTION__, a->instr, instr);
}
#endif
memcpy(instr, a->replacement, a->replacementlen);
diff = a->instrlen - a->replacementlen; diff = a->instrlen - a->replacementlen;
/* Pad the rest with nops */ /* Pad the rest with nops */
for (i = a->replacementlen; diff > 0; diff -= k, i += k) { for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
...@@ -186,14 +236,6 @@ struct smp_alt_module { ...@@ -186,14 +236,6 @@ struct smp_alt_module {
static LIST_HEAD(smp_alt_modules); static LIST_HEAD(smp_alt_modules);
static DEFINE_SPINLOCK(smp_alt); static DEFINE_SPINLOCK(smp_alt);
static int smp_alt_once = 0;
static int __init bootonly(char *str)
{
smp_alt_once = 1;
return 1;
}
__setup("smp-alt-boot", bootonly);
void alternatives_smp_module_add(struct module *mod, char *name, void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end, void *locks, void *locks_end,
void *text, void *text_end) void *text, void *text_end)
...@@ -201,6 +243,9 @@ void alternatives_smp_module_add(struct module *mod, char *name, ...@@ -201,6 +243,9 @@ void alternatives_smp_module_add(struct module *mod, char *name,
struct smp_alt_module *smp; struct smp_alt_module *smp;
unsigned long flags; unsigned long flags;
if (no_replacement)
return;
if (smp_alt_once) { if (smp_alt_once) {
if (boot_cpu_has(X86_FEATURE_UP)) if (boot_cpu_has(X86_FEATURE_UP))
alternatives_smp_unlock(locks, locks_end, alternatives_smp_unlock(locks, locks_end,
...@@ -235,7 +280,7 @@ void alternatives_smp_module_del(struct module *mod) ...@@ -235,7 +280,7 @@ void alternatives_smp_module_del(struct module *mod)
struct smp_alt_module *item; struct smp_alt_module *item;
unsigned long flags; unsigned long flags;
if (smp_alt_once) if (no_replacement || smp_alt_once)
return; return;
spin_lock_irqsave(&smp_alt, flags); spin_lock_irqsave(&smp_alt, flags);
...@@ -256,7 +301,7 @@ void alternatives_smp_switch(int smp) ...@@ -256,7 +301,7 @@ void alternatives_smp_switch(int smp)
struct smp_alt_module *mod; struct smp_alt_module *mod;
unsigned long flags; unsigned long flags;
if (smp_alt_once) if (no_replacement || smp_alt_once)
return; return;
BUG_ON(!smp && (num_online_cpus() > 1)); BUG_ON(!smp && (num_online_cpus() > 1));
...@@ -285,6 +330,13 @@ void alternatives_smp_switch(int smp) ...@@ -285,6 +330,13 @@ void alternatives_smp_switch(int smp)
void __init alternative_instructions(void) void __init alternative_instructions(void)
{ {
if (no_replacement) {
printk(KERN_INFO "(SMP-)alternatives turned off\n");
free_init_pages("SMP alternatives",
(unsigned long)__smp_alt_begin,
(unsigned long)__smp_alt_end);
return;
}
apply_alternatives(__alt_instructions, __alt_instructions_end); apply_alternatives(__alt_instructions, __alt_instructions_end);
/* switch to patch-once-at-boottime-only mode and free the /* switch to patch-once-at-boottime-only mode and free the
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <asm/arch_hooks.h> #include <asm/arch_hooks.h>
#include <asm/hpet.h> #include <asm/hpet.h>
#include <asm/i8253.h> #include <asm/i8253.h>
#include <asm/nmi.h>
#include <mach_apic.h> #include <mach_apic.h>
#include <mach_apicdef.h> #include <mach_apicdef.h>
...@@ -156,7 +157,7 @@ void clear_local_APIC(void) ...@@ -156,7 +157,7 @@ void clear_local_APIC(void)
maxlvt = get_maxlvt(); maxlvt = get_maxlvt();
/* /*
* Masking an LVT entry on a P6 can trigger a local APIC error * Masking an LVT entry can trigger a local APIC error
* if the vector is zero. Mask LVTERR first to prevent this. * if the vector is zero. Mask LVTERR first to prevent this.
*/ */
if (maxlvt >= 3) { if (maxlvt >= 3) {
...@@ -1117,7 +1118,18 @@ void disable_APIC_timer(void) ...@@ -1117,7 +1118,18 @@ void disable_APIC_timer(void)
unsigned long v; unsigned long v;
v = apic_read(APIC_LVTT); v = apic_read(APIC_LVTT);
apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED); /*
* When an illegal vector value (0-15) is written to an LVT
* entry and delivery mode is Fixed, the APIC may signal an
* illegal vector error, with out regard to whether the mask
* bit is set or whether an interrupt is actually seen on input.
*
* Boot sequence might call this function when the LVTT has
* '0' vector value. So make sure vector field is set to
* valid value.
*/
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write_around(APIC_LVTT, v);
} }
} }
......
...@@ -764,9 +764,9 @@ static int apm_do_idle(void) ...@@ -764,9 +764,9 @@ static int apm_do_idle(void)
int idled = 0; int idled = 0;
int polling; int polling;
polling = test_thread_flag(TIF_POLLING_NRFLAG); polling = !!(current_thread_info()->status & TS_POLLING);
if (polling) { if (polling) {
clear_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
} }
if (!need_resched()) { if (!need_resched()) {
...@@ -774,7 +774,7 @@ static int apm_do_idle(void) ...@@ -774,7 +774,7 @@ static int apm_do_idle(void)
ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax); ret = apm_bios_call_simple(APM_FUNC_IDLE, 0, 0, &eax);
} }
if (polling) if (polling)
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
if (!idled) if (!idled)
return 0; return 0;
......
...@@ -224,15 +224,17 @@ static void __init init_amd(struct cpuinfo_x86 *c) ...@@ -224,15 +224,17 @@ static void __init init_amd(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_HT #ifdef CONFIG_X86_HT
/* /*
* On a AMD dual core setup the lower bits of the APIC id * On a AMD multi core setup the lower bits of the APIC id
* distingush the cores. Assumes number of cores is a power * distingush the cores.
* of two.
*/ */
if (c->x86_max_cores > 1) { if (c->x86_max_cores > 1) {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
unsigned bits = 0; unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
while ((1 << bits) < c->x86_max_cores)
bits++; if (bits == 0) {
while ((1 << bits) < c->x86_max_cores)
bits++;
}
cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
phys_proc_id[cpu] >>= bits; phys_proc_id[cpu] >>= bits;
printk(KERN_INFO "CPU %d(%d) -> Core %d\n", printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
...@@ -240,6 +242,8 @@ static void __init init_amd(struct cpuinfo_x86 *c) ...@@ -240,6 +242,8 @@ static void __init init_amd(struct cpuinfo_x86 *c)
} }
#endif #endif
if (cpuid_eax(0x80000000) >= 0x80000006)
num_cache_leaves = 3;
} }
static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size) static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
......
...@@ -122,6 +122,12 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) ...@@ -122,6 +122,12 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
select_idle_routine(c); select_idle_routine(c);
l2 = init_intel_cacheinfo(c); l2 = init_intel_cacheinfo(c);
if (c->cpuid_level > 9 ) {
unsigned eax = cpuid_eax(10);
/* Check for version and the number of counters */
if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
set_bit(X86_FEATURE_ARCH_PERFMON, c->x86_capability);
}
/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */ /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* Changes: * Changes:
* Venkatesh Pallipadi : Adding cache identification through cpuid(4) * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
* Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
* Andi Kleen : CPUID4 emulation on AMD.
*/ */
#include <linux/init.h> #include <linux/init.h>
...@@ -130,25 +131,111 @@ struct _cpuid4_info { ...@@ -130,25 +131,111 @@ struct _cpuid4_info {
cpumask_t shared_cpu_map; cpumask_t shared_cpu_map;
}; };
static unsigned short num_cache_leaves; unsigned short num_cache_leaves;
/* AMD doesn't have CPUID4. Emulate it here to report the same
information to the user. This makes some assumptions about the machine:
No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs.
In theory the TLBs could be reported as fake type (they are in "dummy").
Maybe later */
union l1_cache {
struct {
unsigned line_size : 8;
unsigned lines_per_tag : 8;
unsigned assoc : 8;
unsigned size_in_kb : 8;
};
unsigned val;
};
union l2_cache {
struct {
unsigned line_size : 8;
unsigned lines_per_tag : 4;
unsigned assoc : 4;
unsigned size_in_kb : 16;
};
unsigned val;
};
static unsigned short assocs[] = {
[1] = 1, [2] = 2, [4] = 4, [6] = 8,
[8] = 16,
[0xf] = 0xffff // ??
};
static unsigned char levels[] = { 1, 1, 2 };
static unsigned char types[] = { 1, 2, 3 };
static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
union _cpuid4_leaf_ebx *ebx,
union _cpuid4_leaf_ecx *ecx)
{
unsigned dummy;
unsigned line_size, lines_per_tag, assoc, size_in_kb;
union l1_cache l1i, l1d;
union l2_cache l2;
eax->full = 0;
ebx->full = 0;
ecx->full = 0;
cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy);
if (leaf > 2 || !l1d.val || !l1i.val || !l2.val)
return;
eax->split.is_self_initializing = 1;
eax->split.type = types[leaf];
eax->split.level = levels[leaf];
eax->split.num_threads_sharing = 0;
eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
if (leaf <= 1) {
union l1_cache *l1 = leaf == 0 ? &l1d : &l1i;
assoc = l1->assoc;
line_size = l1->line_size;
lines_per_tag = l1->lines_per_tag;
size_in_kb = l1->size_in_kb;
} else {
assoc = l2.assoc;
line_size = l2.line_size;
lines_per_tag = l2.lines_per_tag;
/* cpu_data has errata corrections for K7 applied */
size_in_kb = current_cpu_data.x86_cache_size;
}
if (assoc == 0xf)
eax->split.is_fully_associative = 1;
ebx->split.coherency_line_size = line_size - 1;
ebx->split.ways_of_associativity = assocs[assoc] - 1;
ebx->split.physical_line_partition = lines_per_tag - 1;
ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
(ebx->split.ways_of_associativity + 1) - 1;
}
static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{ {
unsigned int eax, ebx, ecx, edx; union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_eax cache_eax; union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned edx;
cpuid_count(4, index, &eax, &ebx, &ecx, &edx); if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
cache_eax.full = eax; amd_cpuid4(index, &eax, &ebx, &ecx);
if (cache_eax.split.type == CACHE_TYPE_NULL) else
cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
if (eax.split.type == CACHE_TYPE_NULL)
return -EIO; /* better error ? */ return -EIO; /* better error ? */
this_leaf->eax.full = eax; this_leaf->eax = eax;
this_leaf->ebx.full = ebx; this_leaf->ebx = ebx;
this_leaf->ecx.full = ecx; this_leaf->ecx = ecx;
this_leaf->size = (this_leaf->ecx.split.number_of_sets + 1) * this_leaf->size = (ecx.split.number_of_sets + 1) *
(this_leaf->ebx.split.coherency_line_size + 1) * (ebx.split.coherency_line_size + 1) *
(this_leaf->ebx.split.physical_line_partition + 1) * (ebx.split.physical_line_partition + 1) *
(this_leaf->ebx.split.ways_of_associativity + 1); (ebx.split.ways_of_associativity + 1);
return 0; return 0;
} }
......
...@@ -120,14 +120,9 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) ...@@ -120,14 +120,9 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
return 1; return 1;
} }
/*
* By using the NMI code instead of a vector we just sneak thru the
* word generator coming out with just what we want. AND it does
* not matter if clustered_apic_mode is set or not.
*/
static void smp_send_nmi_allbutself(void) static void smp_send_nmi_allbutself(void)
{ {
send_IPI_allbutself(APIC_DM_NMI); send_IPI_allbutself(NMI_VECTOR);
} }
static void nmi_shootdown_cpus(void) static void nmi_shootdown_cpus(void)
......
This diff is collapsed.
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/i8259.h> #include <asm/i8259.h>
#include <asm/nmi.h>
#include <mach_apic.h> #include <mach_apic.h>
...@@ -50,6 +51,7 @@ atomic_t irq_mis_count; ...@@ -50,6 +51,7 @@ atomic_t irq_mis_count;
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock); static DEFINE_SPINLOCK(ioapic_lock);
static DEFINE_SPINLOCK(vector_lock);
int timer_over_8254 __initdata = 1; int timer_over_8254 __initdata = 1;
...@@ -1161,10 +1163,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }; ...@@ -1161,10 +1163,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
int assign_irq_vector(int irq) int assign_irq_vector(int irq)
{ {
static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
unsigned long flags;
int vector;
BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
BUG_ON(irq >= NR_IRQ_VECTORS); spin_lock_irqsave(&vector_lock, flags);
if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
spin_unlock_irqrestore(&vector_lock, flags);
return IO_APIC_VECTOR(irq); return IO_APIC_VECTOR(irq);
}
next: next:
current_vector += 8; current_vector += 8;
if (current_vector == SYSCALL_VECTOR) if (current_vector == SYSCALL_VECTOR)
...@@ -1172,16 +1181,21 @@ int assign_irq_vector(int irq) ...@@ -1172,16 +1181,21 @@ int assign_irq_vector(int irq)
if (current_vector >= FIRST_SYSTEM_VECTOR) { if (current_vector >= FIRST_SYSTEM_VECTOR) {
offset++; offset++;
if (!(offset%8)) if (!(offset%8)) {
spin_unlock_irqrestore(&vector_lock, flags);
return -ENOSPC; return -ENOSPC;
}
current_vector = FIRST_DEVICE_VECTOR + offset; current_vector = FIRST_DEVICE_VECTOR + offset;
} }
vector_irq[current_vector] = irq; vector = current_vector;
vector_irq[vector] = irq;
if (irq != AUTO_ASSIGN) if (irq != AUTO_ASSIGN)
IO_APIC_VECTOR(irq) = current_vector; IO_APIC_VECTOR(irq) = vector;
return current_vector; spin_unlock_irqrestore(&vector_lock, flags);
return vector;
} }
static struct hw_interrupt_type ioapic_level_type; static struct hw_interrupt_type ioapic_level_type;
...@@ -1193,21 +1207,14 @@ static struct hw_interrupt_type ioapic_edge_type; ...@@ -1193,21 +1207,14 @@ static struct hw_interrupt_type ioapic_edge_type;
static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{ {
if (use_pci_vector() && !platform_legacy_irq(irq)) { unsigned idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL) if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
irq_desc[vector].handler = &ioapic_level_type; trigger == IOAPIC_LEVEL)
else irq_desc[idx].handler = &ioapic_level_type;
irq_desc[vector].handler = &ioapic_edge_type; else
set_intr_gate(vector, interrupt[vector]); irq_desc[idx].handler = &ioapic_edge_type;
} else { set_intr_gate(vector, interrupt[idx]);
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
irq_desc[irq].handler = &ioapic_level_type;
else
irq_desc[irq].handler = &ioapic_edge_type;
set_intr_gate(vector, interrupt[irq]);
}
} }
static void __init setup_IO_APIC_irqs(void) static void __init setup_IO_APIC_irqs(void)
......
...@@ -227,7 +227,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -227,7 +227,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) { if (i == 0) {
seq_printf(p, " "); seq_printf(p, " ");
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "CPU%d ",j); seq_printf(p, "CPU%-8d",j);
seq_putc(p, '\n'); seq_putc(p, '\n');
} }
......
...@@ -14,21 +14,17 @@ ...@@ -14,21 +14,17 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/mm.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
#include <linux/kernel_stat.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/percpu.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/div64.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/intel_arch_perfmon.h>
#include "mach_traps.h" #include "mach_traps.h"
...@@ -100,6 +96,9 @@ int nmi_active; ...@@ -100,6 +96,9 @@ int nmi_active;
(P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* The performance counters used by NMI_LOCAL_APIC don't trigger when /* The performance counters used by NMI_LOCAL_APIC don't trigger when
* the CPU is idle. To make sure the NMI watchdog really ticks on all * the CPU is idle. To make sure the NMI watchdog really ticks on all
...@@ -212,6 +211,8 @@ static int __init setup_nmi_watchdog(char *str) ...@@ -212,6 +211,8 @@ static int __init setup_nmi_watchdog(char *str)
__setup("nmi_watchdog=", setup_nmi_watchdog); __setup("nmi_watchdog=", setup_nmi_watchdog);
static void disable_intel_arch_watchdog(void);
static void disable_lapic_nmi_watchdog(void) static void disable_lapic_nmi_watchdog(void)
{ {
if (nmi_active <= 0) if (nmi_active <= 0)
...@@ -221,6 +222,10 @@ static void disable_lapic_nmi_watchdog(void) ...@@ -221,6 +222,10 @@ static void disable_lapic_nmi_watchdog(void)
wrmsr(MSR_K7_EVNTSEL0, 0, 0); wrmsr(MSR_K7_EVNTSEL0, 0, 0);
break; break;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
disable_intel_arch_watchdog();
break;
}
switch (boot_cpu_data.x86) { switch (boot_cpu_data.x86) {
case 6: case 6:
if (boot_cpu_data.x86_model > 0xd) if (boot_cpu_data.x86_model > 0xd)
...@@ -449,6 +454,53 @@ static int setup_p4_watchdog(void) ...@@ -449,6 +454,53 @@ static int setup_p4_watchdog(void)
return 1; return 1;
} }
static void disable_intel_arch_watchdog(void)
{
unsigned ebx;
/*
* Check whether the Architectural PerfMon supports
* Unhalted Core Cycles Event or not.
* NOTE: Corresponding bit = 0 in ebp indicates event present.
*/
ebx = cpuid_ebx(10);
if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
}
static int setup_intel_arch_watchdog(void)
{
unsigned int evntsel;
unsigned ebx;
/*
* Check whether the Architectural PerfMon supports
* Unhalted Core Cycles Event or not.
* NOTE: Corresponding bit = 0 in ebp indicates event present.
*/
ebx = cpuid_ebx(10);
if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
return 0;
nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
evntsel = ARCH_PERFMON_EVENTSEL_INT
| ARCH_PERFMON_EVENTSEL_OS
| ARCH_PERFMON_EVENTSEL_USR
| ARCH_PERFMON_NMI_EVENT_SEL
| ARCH_PERFMON_NMI_EVENT_UMASK;
wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
write_watchdog_counter("INTEL_ARCH_PERFCTR0");
apic_write(APIC_LVTPC, APIC_DM_NMI);
evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
return 1;
}
void setup_apic_nmi_watchdog (void) void setup_apic_nmi_watchdog (void)
{ {
switch (boot_cpu_data.x86_vendor) { switch (boot_cpu_data.x86_vendor) {
...@@ -458,6 +510,11 @@ void setup_apic_nmi_watchdog (void) ...@@ -458,6 +510,11 @@ void setup_apic_nmi_watchdog (void)
setup_k7_watchdog(); setup_k7_watchdog();
break; break;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
if (!setup_intel_arch_watchdog())
return;
break;
}
switch (boot_cpu_data.x86) { switch (boot_cpu_data.x86) {
case 6: case 6:
if (boot_cpu_data.x86_model > 0xd) if (boot_cpu_data.x86_model > 0xd)
...@@ -561,7 +618,8 @@ void nmi_watchdog_tick (struct pt_regs * regs) ...@@ -561,7 +618,8 @@ void nmi_watchdog_tick (struct pt_regs * regs)
wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
} }
else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) { else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 ||
nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
/* Only P6 based Pentium M need to re-unmask /* Only P6 based Pentium M need to re-unmask
* the apic vector but it doesn't hurt * the apic vector but it doesn't hurt
* other P6 variant */ * other P6 variant */
......
...@@ -102,7 +102,7 @@ void default_idle(void) ...@@ -102,7 +102,7 @@ void default_idle(void)
local_irq_enable(); local_irq_enable();
if (!hlt_counter && boot_cpu_data.hlt_works_ok) { if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
clear_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
while (!need_resched()) { while (!need_resched()) {
local_irq_disable(); local_irq_disable();
...@@ -111,7 +111,7 @@ void default_idle(void) ...@@ -111,7 +111,7 @@ void default_idle(void)
else else
local_irq_enable(); local_irq_enable();
} }
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
} else { } else {
while (!need_resched()) while (!need_resched())
cpu_relax(); cpu_relax();
...@@ -174,7 +174,7 @@ void cpu_idle(void) ...@@ -174,7 +174,7 @@ void cpu_idle(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
...@@ -312,7 +312,7 @@ void show_regs(struct pt_regs * regs) ...@@ -312,7 +312,7 @@ void show_regs(struct pt_regs * regs)
cr3 = read_cr3(); cr3 = read_cr3();
cr4 = read_cr4_safe(); cr4 = read_cr4_safe();
printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
show_trace(NULL, &regs->esp); show_trace(NULL, regs, &regs->esp);
} }
/* /*
......
...@@ -114,7 +114,17 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_m ...@@ -114,7 +114,17 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_m
static inline int __prepare_ICR (unsigned int shortcut, int vector) static inline int __prepare_ICR (unsigned int shortcut, int vector)
{ {
return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL; unsigned int icr = shortcut | APIC_DEST_LOGICAL;
switch (vector) {
default:
icr |= APIC_DM_FIXED | vector;
break;
case NMI_VECTOR:
icr |= APIC_DM_NMI;
break;
}
return icr;
} }
static inline int __prepare_ICR2 (unsigned int mask) static inline int __prepare_ICR2 (unsigned int mask)
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/arch_hooks.h> #include <asm/arch_hooks.h>
#include <asm/nmi.h>
#include <mach_apic.h> #include <mach_apic.h>
#include <mach_wakecpu.h> #include <mach_wakecpu.h>
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/unwind.h>
#ifdef CONFIG_EISA #ifdef CONFIG_EISA
#include <linux/ioport.h> #include <linux/ioport.h>
...@@ -47,7 +48,7 @@ ...@@ -47,7 +48,7 @@
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/i387.h> #include <asm/i387.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/unwind.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/arch_hooks.h> #include <asm/arch_hooks.h>
#include <asm/kdebug.h> #include <asm/kdebug.h>
...@@ -92,6 +93,7 @@ asmlinkage void spurious_interrupt_bug(void); ...@@ -92,6 +93,7 @@ asmlinkage void spurious_interrupt_bug(void);
asmlinkage void machine_check(void); asmlinkage void machine_check(void);
static int kstack_depth_to_print = 24; static int kstack_depth_to_print = 24;
static int call_trace = 1;
ATOMIC_NOTIFIER_HEAD(i386die_chain); ATOMIC_NOTIFIER_HEAD(i386die_chain);
int register_die_notifier(struct notifier_block *nb) int register_die_notifier(struct notifier_block *nb)
...@@ -170,7 +172,23 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo, ...@@ -170,7 +172,23 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
return ebp; return ebp;
} }
static void show_trace_log_lvl(struct task_struct *task, static asmlinkage int show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
{
int n = 0;
int printed = 0; /* nr of entries already printed on current line */
while (unwind(info) == 0 && UNW_PC(info)) {
++n;
printed = print_addr_and_symbol(UNW_PC(info), log_lvl, printed);
if (arch_unw_user_mode(info))
break;
}
if (printed)
printk("\n");
return n;
}
static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, char *log_lvl) unsigned long *stack, char *log_lvl)
{ {
unsigned long ebp; unsigned long ebp;
...@@ -178,6 +196,26 @@ static void show_trace_log_lvl(struct task_struct *task, ...@@ -178,6 +196,26 @@ static void show_trace_log_lvl(struct task_struct *task,
if (!task) if (!task)
task = current; task = current;
if (call_trace >= 0) {
int unw_ret = 0;
struct unwind_frame_info info;
if (regs) {
if (unwind_init_frame_info(&info, task, regs) == 0)
unw_ret = show_trace_unwind(&info, log_lvl);
} else if (task == current)
unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
else {
if (unwind_init_blocked(&info, task) == 0)
unw_ret = show_trace_unwind(&info, log_lvl);
}
if (unw_ret > 0) {
if (call_trace > 0)
return;
printk("%sLegacy call trace:\n", log_lvl);
}
}
if (task == current) { if (task == current) {
/* Grab ebp right from our regs */ /* Grab ebp right from our regs */
asm ("movl %%ebp, %0" : "=r" (ebp) : ); asm ("movl %%ebp, %0" : "=r" (ebp) : );
...@@ -198,13 +236,13 @@ static void show_trace_log_lvl(struct task_struct *task, ...@@ -198,13 +236,13 @@ static void show_trace_log_lvl(struct task_struct *task,
} }
} }
void show_trace(struct task_struct *task, unsigned long * stack) void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
{ {
show_trace_log_lvl(task, stack, ""); show_trace_log_lvl(task, regs, stack, "");
} }
static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
char *log_lvl) unsigned long *esp, char *log_lvl)
{ {
unsigned long *stack; unsigned long *stack;
int i; int i;
...@@ -225,13 +263,13 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, ...@@ -225,13 +263,13 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp,
printk("%08lx ", *stack++); printk("%08lx ", *stack++);
} }
printk("\n%sCall Trace:\n", log_lvl); printk("\n%sCall Trace:\n", log_lvl);
show_trace_log_lvl(task, esp, log_lvl); show_trace_log_lvl(task, regs, esp, log_lvl);
} }
void show_stack(struct task_struct *task, unsigned long *esp) void show_stack(struct task_struct *task, unsigned long *esp)
{ {
printk(" "); printk(" ");
show_stack_log_lvl(task, esp, ""); show_stack_log_lvl(task, NULL, esp, "");
} }
/* /*
...@@ -241,7 +279,7 @@ void dump_stack(void) ...@@ -241,7 +279,7 @@ void dump_stack(void)
{ {
unsigned long stack; unsigned long stack;
show_trace(current, &stack); show_trace(current, NULL, &stack);
} }
EXPORT_SYMBOL(dump_stack); EXPORT_SYMBOL(dump_stack);
...@@ -285,7 +323,7 @@ void show_registers(struct pt_regs *regs) ...@@ -285,7 +323,7 @@ void show_registers(struct pt_regs *regs)
u8 __user *eip; u8 __user *eip;
printk("\n" KERN_EMERG "Stack: "); printk("\n" KERN_EMERG "Stack: ");
show_stack_log_lvl(NULL, (unsigned long *)esp, KERN_EMERG); show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
printk(KERN_EMERG "Code: "); printk(KERN_EMERG "Code: ");
...@@ -1215,3 +1253,15 @@ static int __init kstack_setup(char *s) ...@@ -1215,3 +1253,15 @@ static int __init kstack_setup(char *s)
return 1; return 1;
} }
__setup("kstack=", kstack_setup); __setup("kstack=", kstack_setup);
static int __init call_trace_setup(char *s)
{
if (strcmp(s, "old") == 0)
call_trace = -1;
else if (strcmp(s, "both") == 0)
call_trace = 0;
else if (strcmp(s, "new") == 0)
call_trace = 1;
return 1;
}
__setup("call_trace=", call_trace_setup);
...@@ -71,6 +71,15 @@ SECTIONS ...@@ -71,6 +71,15 @@ SECTIONS
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) } .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) }
_edata = .; /* End of data section */ _edata = .; /* End of data section */
#ifdef CONFIG_STACK_UNWIND
. = ALIGN(4);
.eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) {
__start_unwind = .;
*(.eh_frame)
__end_unwind = .;
}
#endif
. = ALIGN(THREAD_SIZE); /* init_task */ . = ALIGN(THREAD_SIZE); /* init_task */
.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) { .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
*(.data.init_task) *(.data.init_task)
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/oprofile.h> #include <linux/oprofile.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/nmi.h>
#include "op_x86_model.h" #include "op_x86_model.h"
#include "op_counter.h" #include "op_counter.h"
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/nmi.h>
#include "op_x86_model.h" #include "op_x86_model.h"
#include "op_counter.h" #include "op_counter.h"
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/nmi.h>
#include "op_x86_model.h" #include "op_x86_model.h"
#include "op_counter.h" #include "op_counter.h"
......
...@@ -272,9 +272,9 @@ cpu_idle (void) ...@@ -272,9 +272,9 @@ cpu_idle (void)
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
if (can_do_pal_halt) if (can_do_pal_halt)
clear_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status &= ~TS_POLLING;
else else
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
if (!need_resched()) { if (!need_resched()) {
void (*idle)(void); void (*idle)(void);
......
...@@ -386,24 +386,45 @@ config HPET_EMULATE_RTC ...@@ -386,24 +386,45 @@ config HPET_EMULATE_RTC
bool "Provide RTC interrupt" bool "Provide RTC interrupt"
depends on HPET_TIMER && RTC=y depends on HPET_TIMER && RTC=y
config GART_IOMMU # Mark as embedded because too many people got it wrong.
bool "K8 GART IOMMU support" # The code disables itself when not needed.
config IOMMU
bool "IOMMU support" if EMBEDDED
default y default y
select SWIOTLB select SWIOTLB
select AGP select AGP
depends on PCI depends on PCI
help help
Support for hardware IOMMU in AMD's Opteron/Athlon64 Processors Support for full DMA access of devices with 32bit memory access only
and for the bounce buffering software IOMMU. on systems with more than 3GB. This is usually needed for USB,
Needed to run systems with more than 3GB of memory properly with sound, many IDE/SATA chipsets and some other devices.
32-bit PCI devices that do not support DAC (Double Address Cycle). Provides a driver for the AMD Athlon64/Opteron/Turion/Sempron GART
The IOMMU can be turned off at runtime with the iommu=off parameter. based IOMMU and a software bounce buffer based IOMMU used on Intel
Normally the kernel will take the right choice by itself. systems and as fallback.
This option includes a driver for the AMD Opteron/Athlon64 IOMMU The code is only active when needed (enough memory and limited
northbridge and a software emulation used on other systems without device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified
hardware IOMMU. If unsure, say Y. too.
# need this always selected by GART_IOMMU for the VIA workaround config CALGARY_IOMMU
bool "IBM Calgary IOMMU support"
default y
select SWIOTLB
depends on PCI && EXPERIMENTAL
help
Support for hardware IOMMUs in IBM's xSeries x366 and x460
systems. Needed to run systems with more than 3GB of memory
properly with 32-bit PCI devices that do not support DAC
(Double Address Cycle). Calgary also supports bus level
isolation, where all DMAs pass through the IOMMU. This
prevents them from going anywhere except their intended
destination. This catches hard-to-find kernel bugs and
mis-behaving drivers and devices that do not use the DMA-API
properly to set up their DMA buffers. The IOMMU can be
turned off at boot time with the iommu=off parameter.
Normally the kernel will make the right choice by itself.
If unsure, say Y.
# need this always selected by IOMMU for the VIA workaround
config SWIOTLB config SWIOTLB
bool bool
...@@ -501,6 +522,10 @@ config REORDER ...@@ -501,6 +522,10 @@ config REORDER
optimal TLB usage. If you have pretty much any version of binutils, optimal TLB usage. If you have pretty much any version of binutils,
this can increase your kernel build time by roughly one minute. this can increase your kernel build time by roughly one minute.
config K8_NB
def_bool y
depends on AGP_AMD64 || IOMMU || (PCI && NUMA)
endmenu endmenu
# #
......
...@@ -13,7 +13,7 @@ config DEBUG_RODATA ...@@ -13,7 +13,7 @@ config DEBUG_RODATA
If in doubt, say "N". If in doubt, say "N".
config IOMMU_DEBUG config IOMMU_DEBUG
depends on GART_IOMMU && DEBUG_KERNEL depends on IOMMU && DEBUG_KERNEL
bool "Enable IOMMU debugging" bool "Enable IOMMU debugging"
help help
Force the IOMMU to on even when you have less than 4GB of Force the IOMMU to on even when you have less than 4GB of
...@@ -35,6 +35,22 @@ config IOMMU_LEAK ...@@ -35,6 +35,22 @@ config IOMMU_LEAK
Add a simple leak tracer to the IOMMU code. This is useful when you Add a simple leak tracer to the IOMMU code. This is useful when you
are debugging a buggy device driver that leaks IOMMU mappings. are debugging a buggy device driver that leaks IOMMU mappings.
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL
help
This option will cause messages to be printed if free stack space
drops below a certain limit.
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
depends on DEBUG_KERNEL
help
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T and sysrq-P debug output.
This option will slow down process creation somewhat.
#config X86_REMOTE_DEBUG #config X86_REMOTE_DEBUG
# bool "kgdb debugging stub" # bool "kgdb debugging stub"
......
...@@ -27,6 +27,7 @@ LDFLAGS_vmlinux := ...@@ -27,6 +27,7 @@ LDFLAGS_vmlinux :=
CHECKFLAGS += -D__x86_64__ -m64 CHECKFLAGS += -D__x86_64__ -m64
cflags-y := cflags-y :=
cflags-kernel-y :=
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8) cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona) cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
...@@ -35,7 +36,7 @@ cflags-y += -m64 ...@@ -35,7 +36,7 @@ cflags-y += -m64
cflags-y += -mno-red-zone cflags-y += -mno-red-zone
cflags-y += -mcmodel=kernel cflags-y += -mcmodel=kernel
cflags-y += -pipe cflags-y += -pipe
cflags-$(CONFIG_REORDER) += -ffunction-sections cflags-kernel-$(CONFIG_REORDER) += -ffunction-sections
# this makes reading assembly source easier, but produces worse code # this makes reading assembly source easier, but produces worse code
# actually it makes the kernel smaller too. # actually it makes the kernel smaller too.
cflags-y += -fno-reorder-blocks cflags-y += -fno-reorder-blocks
...@@ -55,6 +56,7 @@ cflags-y += $(call cc-option,-funit-at-a-time) ...@@ -55,6 +56,7 @@ cflags-y += $(call cc-option,-funit-at-a-time)
cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
CFLAGS += $(cflags-y) CFLAGS += $(cflags-y)
CFLAGS_KERNEL += $(cflags-kernel-y)
AFLAGS += -m64 AFLAGS += -m64
head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o
......
...@@ -107,8 +107,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf ...@@ -107,8 +107,13 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
isoimage: $(BOOTIMAGE) isoimage: $(BOOTIMAGE)
-rm -rf $(obj)/isoimage -rm -rf $(obj)/isoimage
mkdir $(obj)/isoimage mkdir $(obj)/isoimage
cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ for i in lib lib64 share end ; do \
$(obj)/isoimage if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
break ; \
fi ; \
if [ $$i = end ] ; then exit 1 ; fi ; \
done
cp $(BOOTIMAGE) $(obj)/isoimage/linux cp $(BOOTIMAGE) $(obj)/isoimage/linux
echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
if [ -f '$(FDINITRD)' ] ; then \ if [ -f '$(FDINITRD)' ] ; then \
......
...@@ -77,11 +77,11 @@ static void gzip_release(void **); ...@@ -77,11 +77,11 @@ static void gzip_release(void **);
*/ */
static unsigned char *real_mode; /* Pointer to real-mode data */ static unsigned char *real_mode; /* Pointer to real-mode data */
#define EXT_MEM_K (*(unsigned short *)(real_mode + 0x2)) #define RM_EXT_MEM_K (*(unsigned short *)(real_mode + 0x2))
#ifndef STANDARD_MEMORY_BIOS_CALL #ifndef STANDARD_MEMORY_BIOS_CALL
#define ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0)) #define RM_ALT_MEM_K (*(unsigned long *)(real_mode + 0x1e0))
#endif #endif
#define SCREEN_INFO (*(struct screen_info *)(real_mode+0)) #define RM_SCREEN_INFO (*(struct screen_info *)(real_mode+0))
extern unsigned char input_data[]; extern unsigned char input_data[];
extern int input_len; extern int input_len;
...@@ -92,9 +92,9 @@ static unsigned long output_ptr = 0; ...@@ -92,9 +92,9 @@ static unsigned long output_ptr = 0;
static void *malloc(int size); static void *malloc(int size);
static void free(void *where); static void free(void *where);
void* memset(void* s, int c, unsigned n); static void *memset(void *s, int c, unsigned n);
void* memcpy(void* dest, const void* src, unsigned n); static void *memcpy(void *dest, const void *src, unsigned n);
static void putstr(const char *); static void putstr(const char *);
...@@ -162,8 +162,8 @@ static void putstr(const char *s) ...@@ -162,8 +162,8 @@ static void putstr(const char *s)
int x,y,pos; int x,y,pos;
char c; char c;
x = SCREEN_INFO.orig_x; x = RM_SCREEN_INFO.orig_x;
y = SCREEN_INFO.orig_y; y = RM_SCREEN_INFO.orig_y;
while ( ( c = *s++ ) != '\0' ) { while ( ( c = *s++ ) != '\0' ) {
if ( c == '\n' ) { if ( c == '\n' ) {
...@@ -184,8 +184,8 @@ static void putstr(const char *s) ...@@ -184,8 +184,8 @@ static void putstr(const char *s)
} }
} }
SCREEN_INFO.orig_x = x; RM_SCREEN_INFO.orig_x = x;
SCREEN_INFO.orig_y = y; RM_SCREEN_INFO.orig_y = y;
pos = (x + cols * y) * 2; /* Update cursor position */ pos = (x + cols * y) * 2; /* Update cursor position */
outb_p(14, vidport); outb_p(14, vidport);
...@@ -194,7 +194,7 @@ static void putstr(const char *s) ...@@ -194,7 +194,7 @@ static void putstr(const char *s)
outb_p(0xff & (pos >> 1), vidport+1); outb_p(0xff & (pos >> 1), vidport+1);
} }
void* memset(void* s, int c, unsigned n) static void* memset(void* s, int c, unsigned n)
{ {
int i; int i;
char *ss = (char*)s; char *ss = (char*)s;
...@@ -203,7 +203,7 @@ void* memset(void* s, int c, unsigned n) ...@@ -203,7 +203,7 @@ void* memset(void* s, int c, unsigned n)
return s; return s;
} }
void* memcpy(void* dest, const void* src, unsigned n) static void* memcpy(void* dest, const void* src, unsigned n)
{ {
int i; int i;
char *d = (char *)dest, *s = (char *)src; char *d = (char *)dest, *s = (char *)src;
...@@ -278,15 +278,15 @@ static void error(char *x) ...@@ -278,15 +278,15 @@ static void error(char *x)
putstr(x); putstr(x);
putstr("\n\n -- System halted"); putstr("\n\n -- System halted");
while(1); while(1); /* Halt */
} }
void setup_normal_output_buffer(void) static void setup_normal_output_buffer(void)
{ {
#ifdef STANDARD_MEMORY_BIOS_CALL #ifdef STANDARD_MEMORY_BIOS_CALL
if (EXT_MEM_K < 1024) error("Less than 2MB of memory"); if (RM_EXT_MEM_K < 1024) error("Less than 2MB of memory");
#else #else
if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory"); if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
#endif #endif
output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */ output_data = (unsigned char *)__PHYSICAL_START; /* Normally Points to 1M */
free_mem_end_ptr = (long)real_mode; free_mem_end_ptr = (long)real_mode;
...@@ -297,13 +297,13 @@ struct moveparams { ...@@ -297,13 +297,13 @@ struct moveparams {
uch *high_buffer_start; int hcount; uch *high_buffer_start; int hcount;
}; };
void setup_output_buffer_if_we_run_high(struct moveparams *mv) static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
{ {
high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE); high_buffer_start = (uch *)(((ulg)&end) + HEAP_SIZE);
#ifdef STANDARD_MEMORY_BIOS_CALL #ifdef STANDARD_MEMORY_BIOS_CALL
if (EXT_MEM_K < (3*1024)) error("Less than 4MB of memory"); if (RM_EXT_MEM_K < (3*1024)) error("Less than 4MB of memory");
#else #else
if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory"); if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < (3*1024)) error("Less than 4MB of memory");
#endif #endif
mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START; mv->low_buffer_start = output_data = (unsigned char *)LOW_BUFFER_START;
low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX low_buffer_end = ((unsigned int)real_mode > LOW_BUFFER_MAX
...@@ -319,7 +319,7 @@ void setup_output_buffer_if_we_run_high(struct moveparams *mv) ...@@ -319,7 +319,7 @@ void setup_output_buffer_if_we_run_high(struct moveparams *mv)
mv->high_buffer_start = high_buffer_start; mv->high_buffer_start = high_buffer_start;
} }
void close_output_buffer_if_we_run_high(struct moveparams *mv) static void close_output_buffer_if_we_run_high(struct moveparams *mv)
{ {
if (bytes_out > low_buffer_size) { if (bytes_out > low_buffer_size) {
mv->lcount = low_buffer_size; mv->lcount = low_buffer_size;
...@@ -335,7 +335,7 @@ int decompress_kernel(struct moveparams *mv, void *rmode) ...@@ -335,7 +335,7 @@ int decompress_kernel(struct moveparams *mv, void *rmode)
{ {
real_mode = rmode; real_mode = rmode;
if (SCREEN_INFO.orig_video_mode == 7) { if (RM_SCREEN_INFO.orig_video_mode == 7) {
vidmem = (char *) 0xb0000; vidmem = (char *) 0xb0000;
vidport = 0x3b4; vidport = 0x3b4;
} else { } else {
...@@ -343,8 +343,8 @@ int decompress_kernel(struct moveparams *mv, void *rmode) ...@@ -343,8 +343,8 @@ int decompress_kernel(struct moveparams *mv, void *rmode)
vidport = 0x3d4; vidport = 0x3d4;
} }
lines = SCREEN_INFO.orig_video_lines; lines = RM_SCREEN_INFO.orig_video_lines;
cols = SCREEN_INFO.orig_video_cols; cols = RM_SCREEN_INFO.orig_video_cols;
if (free_mem_ptr < 0x100000) setup_normal_output_buffer(); if (free_mem_ptr < 0x100000) setup_normal_output_buffer();
else setup_output_buffer_if_we_run_high(mv); else setup_output_buffer_if_we_run_high(mv);
......
...@@ -149,10 +149,8 @@ int main(int argc, char ** argv) ...@@ -149,10 +149,8 @@ int main(int argc, char ** argv)
sz = sb.st_size; sz = sb.st_size;
fprintf (stderr, "System is %d kB\n", sz/1024); fprintf (stderr, "System is %d kB\n", sz/1024);
sys_size = (sz + 15) / 16; sys_size = (sz + 15) / 16;
/* 0x40000*16 = 4.0 MB, reasonable estimate for the current maximum */ if (!is_big_kernel && sys_size > DEF_SYSSIZE)
if (sys_size > (is_big_kernel ? 0x40000 : DEF_SYSSIZE)) die("System is too big. Try using bzImage or modules.");
die("System is too big. Try using %smodules.",
is_big_kernel ? "" : "bzImage or ");
while (sz > 0) { while (sz > 0) {
int l, n; int l, n;
......
# #
# Automatically generated make config: don't edit # Automatically generated make config: don't edit
# Linux kernel version: 2.6.17-rc1-git11 # Linux kernel version: 2.6.17-git6
# Sun Apr 16 07:22:36 2006 # Sat Jun 24 00:52:28 2006
# #
CONFIG_X86_64=y CONFIG_X86_64=y
CONFIG_64BIT=y CONFIG_64BIT=y
...@@ -42,7 +42,6 @@ CONFIG_IKCONFIG_PROC=y ...@@ -42,7 +42,6 @@ CONFIG_IKCONFIG_PROC=y
# CONFIG_RELAY is not set # CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE="" CONFIG_INITRAMFS_SOURCE=""
CONFIG_UID16=y CONFIG_UID16=y
CONFIG_VM86=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set # CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y CONFIG_KALLSYMS=y
...@@ -57,7 +56,6 @@ CONFIG_FUTEX=y ...@@ -57,7 +56,6 @@ CONFIG_FUTEX=y
CONFIG_EPOLL=y CONFIG_EPOLL=y
CONFIG_SHMEM=y CONFIG_SHMEM=y
CONFIG_SLAB=y CONFIG_SLAB=y
CONFIG_DOUBLEFAULT=y
# CONFIG_TINY_SHMEM is not set # CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0 CONFIG_BASE_SMALL=0
# CONFIG_SLOB is not set # CONFIG_SLOB is not set
...@@ -144,7 +142,8 @@ CONFIG_NR_CPUS=32 ...@@ -144,7 +142,8 @@ CONFIG_NR_CPUS=32
CONFIG_HOTPLUG_CPU=y CONFIG_HOTPLUG_CPU=y
CONFIG_HPET_TIMER=y CONFIG_HPET_TIMER=y
CONFIG_HPET_EMULATE_RTC=y CONFIG_HPET_EMULATE_RTC=y
CONFIG_GART_IOMMU=y CONFIG_IOMMU=y
# CONFIG_CALGARY_IOMMU is not set
CONFIG_SWIOTLB=y CONFIG_SWIOTLB=y
CONFIG_X86_MCE=y CONFIG_X86_MCE=y
CONFIG_X86_MCE_INTEL=y CONFIG_X86_MCE_INTEL=y
...@@ -158,6 +157,7 @@ CONFIG_HZ_250=y ...@@ -158,6 +157,7 @@ CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set # CONFIG_HZ_1000 is not set
CONFIG_HZ=250 CONFIG_HZ=250
# CONFIG_REORDER is not set # CONFIG_REORDER is not set
CONFIG_K8_NB=y
CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_ISA_DMA_API=y CONFIG_ISA_DMA_API=y
...@@ -293,6 +293,8 @@ CONFIG_IP_PNP_DHCP=y ...@@ -293,6 +293,8 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_IPCOMP is not set # CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set # CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set # CONFIG_INET_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
CONFIG_INET_DIAG=y CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set # CONFIG_TCP_CONG_ADVANCED is not set
...@@ -305,7 +307,10 @@ CONFIG_IPV6=y ...@@ -305,7 +307,10 @@ CONFIG_IPV6=y
# CONFIG_INET6_IPCOMP is not set # CONFIG_INET6_IPCOMP is not set
# CONFIG_INET6_XFRM_TUNNEL is not set # CONFIG_INET6_XFRM_TUNNEL is not set
# CONFIG_INET6_TUNNEL is not set # CONFIG_INET6_TUNNEL is not set
# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
# CONFIG_IPV6_TUNNEL is not set # CONFIG_IPV6_TUNNEL is not set
# CONFIG_NETWORK_SECMARK is not set
# CONFIG_NETFILTER is not set # CONFIG_NETFILTER is not set
# #
...@@ -344,6 +349,7 @@ CONFIG_IPV6=y ...@@ -344,6 +349,7 @@ CONFIG_IPV6=y
# Network testing # Network testing
# #
# CONFIG_NET_PKTGEN is not set # CONFIG_NET_PKTGEN is not set
# CONFIG_NET_TCPPROBE is not set
# CONFIG_HAMRADIO is not set # CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set # CONFIG_IRDA is not set
# CONFIG_BT is not set # CONFIG_BT is not set
...@@ -360,6 +366,7 @@ CONFIG_STANDALONE=y ...@@ -360,6 +366,7 @@ CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set # CONFIG_DEBUG_DRIVER is not set
# CONFIG_SYS_HYPERVISOR is not set
# #
# Connector - unified userspace <-> kernelspace linker # Connector - unified userspace <-> kernelspace linker
...@@ -526,6 +533,7 @@ CONFIG_SCSI_ATA_PIIX=y ...@@ -526,6 +533,7 @@ CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_MV is not set # CONFIG_SCSI_SATA_MV is not set
CONFIG_SCSI_SATA_NV=y CONFIG_SCSI_SATA_NV=y
# CONFIG_SCSI_PDC_ADMA is not set # CONFIG_SCSI_PDC_ADMA is not set
# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_SATA_QSTOR is not set # CONFIG_SCSI_SATA_QSTOR is not set
# CONFIG_SCSI_SATA_PROMISE is not set # CONFIG_SCSI_SATA_PROMISE is not set
# CONFIG_SCSI_SATA_SX4 is not set # CONFIG_SCSI_SATA_SX4 is not set
...@@ -591,10 +599,7 @@ CONFIG_IEEE1394=y ...@@ -591,10 +599,7 @@ CONFIG_IEEE1394=y
# #
# Device Drivers # Device Drivers
# #
# CONFIG_IEEE1394_PCILYNX is not set
#
# Texas Instruments PCILynx requires I2C
#
CONFIG_IEEE1394_OHCI1394=y CONFIG_IEEE1394_OHCI1394=y
# #
...@@ -645,7 +650,16 @@ CONFIG_VORTEX=y ...@@ -645,7 +650,16 @@ CONFIG_VORTEX=y
# #
# Tulip family network device support # Tulip family network device support
# #
# CONFIG_NET_TULIP is not set CONFIG_NET_TULIP=y
# CONFIG_DE2104X is not set
CONFIG_TULIP=y
# CONFIG_TULIP_MWI is not set
# CONFIG_TULIP_MMIO is not set
# CONFIG_TULIP_NAPI is not set
# CONFIG_DE4X5 is not set
# CONFIG_WINBOND_840 is not set
# CONFIG_DM9102 is not set
# CONFIG_ULI526X is not set
# CONFIG_HP100 is not set # CONFIG_HP100 is not set
CONFIG_NET_PCI=y CONFIG_NET_PCI=y
# CONFIG_PCNET32 is not set # CONFIG_PCNET32 is not set
...@@ -697,6 +711,7 @@ CONFIG_TIGON3=y ...@@ -697,6 +711,7 @@ CONFIG_TIGON3=y
# CONFIG_IXGB is not set # CONFIG_IXGB is not set
CONFIG_S2IO=m CONFIG_S2IO=m
# CONFIG_S2IO_NAPI is not set # CONFIG_S2IO_NAPI is not set
# CONFIG_MYRI10GE is not set
# #
# Token Ring devices # Token Ring devices
...@@ -887,7 +902,56 @@ CONFIG_HPET_MMAP=y ...@@ -887,7 +902,56 @@ CONFIG_HPET_MMAP=y
# #
# I2C support # I2C support
# #
# CONFIG_I2C is not set CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
#
# I2C Algorithms
#
# CONFIG_I2C_ALGOBIT is not set
# CONFIG_I2C_ALGOPCF is not set
# CONFIG_I2C_ALGOPCA is not set
#
# I2C Hardware Bus support
#
# CONFIG_I2C_ALI1535 is not set
# CONFIG_I2C_ALI1563 is not set
# CONFIG_I2C_ALI15X3 is not set
# CONFIG_I2C_AMD756 is not set
# CONFIG_I2C_AMD8111 is not set
# CONFIG_I2C_I801 is not set
# CONFIG_I2C_I810 is not set
# CONFIG_I2C_PIIX4 is not set
CONFIG_I2C_ISA=m
# CONFIG_I2C_NFORCE2 is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_PROSAVAGE is not set
# CONFIG_I2C_SAVAGE4 is not set
# CONFIG_I2C_SIS5595 is not set
# CONFIG_I2C_SIS630 is not set
# CONFIG_I2C_SIS96X is not set
# CONFIG_I2C_STUB is not set
# CONFIG_I2C_VIA is not set
# CONFIG_I2C_VIAPRO is not set
# CONFIG_I2C_VOODOO3 is not set
# CONFIG_I2C_PCA_ISA is not set
#
# Miscellaneous I2C Chip support
#
# CONFIG_SENSORS_DS1337 is not set
# CONFIG_SENSORS_DS1374 is not set
# CONFIG_SENSORS_EEPROM is not set
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# #
# SPI support # SPI support
...@@ -898,14 +962,51 @@ CONFIG_HPET_MMAP=y ...@@ -898,14 +962,51 @@ CONFIG_HPET_MMAP=y
# #
# Dallas's 1-wire bus # Dallas's 1-wire bus
# #
# CONFIG_W1 is not set
# #
# Hardware Monitoring support # Hardware Monitoring support
# #
CONFIG_HWMON=y CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set # CONFIG_HWMON_VID is not set
# CONFIG_SENSORS_ABITUGURU is not set
# CONFIG_SENSORS_ADM1021 is not set
# CONFIG_SENSORS_ADM1025 is not set
# CONFIG_SENSORS_ADM1026 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
# CONFIG_SENSORS_ASB100 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_F71805F is not set # CONFIG_SENSORS_F71805F is not set
# CONFIG_SENSORS_FSCHER is not set
# CONFIG_SENSORS_FSCPOS is not set
# CONFIG_SENSORS_GL518SM is not set
# CONFIG_SENSORS_GL520SM is not set
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_LM63 is not set
# CONFIG_SENSORS_LM75 is not set
# CONFIG_SENSORS_LM77 is not set
# CONFIG_SENSORS_LM78 is not set
# CONFIG_SENSORS_LM80 is not set
# CONFIG_SENSORS_LM83 is not set
# CONFIG_SENSORS_LM85 is not set
# CONFIG_SENSORS_LM87 is not set
# CONFIG_SENSORS_LM90 is not set
# CONFIG_SENSORS_LM92 is not set
# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_SIS5595 is not set
# CONFIG_SENSORS_SMSC47M1 is not set
# CONFIG_SENSORS_SMSC47M192 is not set
CONFIG_SENSORS_SMSC47B397=m
# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_VT8231 is not set
# CONFIG_SENSORS_W83781D is not set
# CONFIG_SENSORS_W83791D is not set
# CONFIG_SENSORS_W83792D is not set
# CONFIG_SENSORS_W83L785TS is not set
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_HDAPS is not set # CONFIG_SENSORS_HDAPS is not set
# CONFIG_HWMON_DEBUG_CHIP is not set # CONFIG_HWMON_DEBUG_CHIP is not set
...@@ -918,6 +1019,7 @@ CONFIG_HWMON=y ...@@ -918,6 +1019,7 @@ CONFIG_HWMON=y
# Multimedia devices # Multimedia devices
# #
# CONFIG_VIDEO_DEV is not set # CONFIG_VIDEO_DEV is not set
CONFIG_VIDEO_V4L2=y
# #
# Digital Video Broadcasting Devices # Digital Video Broadcasting Devices
...@@ -953,28 +1055,17 @@ CONFIG_SOUND=y ...@@ -953,28 +1055,17 @@ CONFIG_SOUND=y
# Open Sound System # Open Sound System
# #
CONFIG_SOUND_PRIME=y CONFIG_SOUND_PRIME=y
CONFIG_OBSOLETE_OSS_DRIVER=y
# CONFIG_SOUND_BT878 is not set # CONFIG_SOUND_BT878 is not set
# CONFIG_SOUND_CMPCI is not set
# CONFIG_SOUND_EMU10K1 is not set # CONFIG_SOUND_EMU10K1 is not set
# CONFIG_SOUND_FUSION is not set # CONFIG_SOUND_FUSION is not set
# CONFIG_SOUND_CS4281 is not set
# CONFIG_SOUND_ES1370 is not set
# CONFIG_SOUND_ES1371 is not set # CONFIG_SOUND_ES1371 is not set
# CONFIG_SOUND_ESSSOLO1 is not set
# CONFIG_SOUND_MAESTRO is not set
# CONFIG_SOUND_MAESTRO3 is not set
CONFIG_SOUND_ICH=y CONFIG_SOUND_ICH=y
# CONFIG_SOUND_SONICVIBES is not set
# CONFIG_SOUND_TRIDENT is not set # CONFIG_SOUND_TRIDENT is not set
# CONFIG_SOUND_MSNDCLAS is not set # CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set # CONFIG_SOUND_MSNDPIN is not set
# CONFIG_SOUND_VIA82CXXX is not set # CONFIG_SOUND_VIA82CXXX is not set
# CONFIG_SOUND_OSS is not set # CONFIG_SOUND_OSS is not set
# CONFIG_SOUND_ALI5455 is not set # CONFIG_SOUND_TVMIXER is not set
# CONFIG_SOUND_FORTE is not set
# CONFIG_SOUND_RME96XX is not set
# CONFIG_SOUND_AD1980 is not set
# #
# USB support # USB support
...@@ -1000,6 +1091,7 @@ CONFIG_USB_DEVICEFS=y ...@@ -1000,6 +1091,7 @@ CONFIG_USB_DEVICEFS=y
CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_SPLIT_ISO is not set # CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set # CONFIG_USB_EHCI_ROOT_HUB_TT is not set
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
# CONFIG_USB_ISP116X_HCD is not set # CONFIG_USB_ISP116X_HCD is not set
CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN is not set # CONFIG_USB_OHCI_BIG_ENDIAN is not set
...@@ -1089,10 +1181,12 @@ CONFIG_USB_MON=y ...@@ -1089,10 +1181,12 @@ CONFIG_USB_MON=y
# CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set # CONFIG_USB_LCD is not set
# CONFIG_USB_LED is not set # CONFIG_USB_LED is not set
# CONFIG_USB_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set # CONFIG_USB_CYTHERM is not set
# CONFIG_USB_PHIDGETKIT is not set # CONFIG_USB_PHIDGETKIT is not set
# CONFIG_USB_PHIDGETSERVO is not set # CONFIG_USB_PHIDGETSERVO is not set
# CONFIG_USB_IDMOUSE is not set # CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_APPLEDISPLAY is not set
# CONFIG_USB_SISUSBVGA is not set # CONFIG_USB_SISUSBVGA is not set
# CONFIG_USB_LD is not set # CONFIG_USB_LD is not set
# CONFIG_USB_TEST is not set # CONFIG_USB_TEST is not set
...@@ -1140,6 +1234,19 @@ CONFIG_USB_MON=y ...@@ -1140,6 +1234,19 @@ CONFIG_USB_MON=y
# #
# CONFIG_RTC_CLASS is not set # CONFIG_RTC_CLASS is not set
#
# DMA Engine support
#
# CONFIG_DMA_ENGINE is not set
#
# DMA Clients
#
#
# DMA Devices
#
# #
# Firmware Drivers # Firmware Drivers
# #
...@@ -1175,6 +1282,7 @@ CONFIG_FS_POSIX_ACL=y ...@@ -1175,6 +1282,7 @@ CONFIG_FS_POSIX_ACL=y
# CONFIG_MINIX_FS is not set # CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set # CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set # CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=y CONFIG_AUTOFS_FS=y
...@@ -1331,7 +1439,8 @@ CONFIG_DETECT_SOFTLOCKUP=y ...@@ -1331,7 +1439,8 @@ CONFIG_DETECT_SOFTLOCKUP=y
CONFIG_DEBUG_FS=y CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_VM is not set
# CONFIG_FRAME_POINTER is not set # CONFIG_FRAME_POINTER is not set
# CONFIG_UNWIND_INFO is not set CONFIG_UNWIND_INFO=y
CONFIG_STACK_UNWIND=y
# CONFIG_FORCED_INLINING is not set # CONFIG_FORCED_INLINING is not set
# CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_DEBUG_RODATA is not set # CONFIG_DEBUG_RODATA is not set
......
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
* Copyright 2002 Andi Kleen, SuSE Labs. * Copyright 2002 Andi Kleen, SuSE Labs.
* FXSAVE<->i387 conversion support. Based on code by Gareth Hughes. * FXSAVE<->i387 conversion support. Based on code by Gareth Hughes.
* This is used for ptrace, signals and coredumps in 32bit emulation. * This is used for ptrace, signals and coredumps in 32bit emulation.
* $Id: fpu32.c,v 1.1 2002/03/21 14:16:32 ak Exp $
*/ */
#include <linux/sched.h> #include <linux/sched.h>
......
...@@ -6,8 +6,6 @@ ...@@ -6,8 +6,6 @@
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
* 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen * 2000-12-* x86-64 compatibility mode signal handling by Andi Kleen
*
* $Id: ia32_signal.c,v 1.22 2002/07/29 10:34:03 ak Exp $
*/ */
#include <linux/sched.h> #include <linux/sched.h>
......
...@@ -155,6 +155,7 @@ sysenter_tracesys: ...@@ -155,6 +155,7 @@ sysenter_tracesys:
.previous .previous
jmp sysenter_do_call jmp sysenter_do_call
CFI_ENDPROC CFI_ENDPROC
ENDPROC(ia32_sysenter_target)
/* /*
* 32bit SYSCALL instruction entry. * 32bit SYSCALL instruction entry.
...@@ -178,7 +179,7 @@ sysenter_tracesys: ...@@ -178,7 +179,7 @@ sysenter_tracesys:
*/ */
ENTRY(ia32_cstar_target) ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple CFI_STARTPROC32 simple
CFI_DEF_CFA rsp,0 CFI_DEF_CFA rsp,PDA_STACKOFFSET
CFI_REGISTER rip,rcx CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/ /*CFI_REGISTER rflags,r11*/
swapgs swapgs
...@@ -249,6 +250,7 @@ cstar_tracesys: ...@@ -249,6 +250,7 @@ cstar_tracesys:
.quad 1b,ia32_badarg .quad 1b,ia32_badarg
.previous .previous
jmp cstar_do_call jmp cstar_do_call
END(ia32_cstar_target)
ia32_badarg: ia32_badarg:
movq $-EFAULT,%rax movq $-EFAULT,%rax
...@@ -314,16 +316,13 @@ ia32_tracesys: ...@@ -314,16 +316,13 @@ ia32_tracesys:
LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
RESTORE_REST RESTORE_REST
jmp ia32_do_syscall jmp ia32_do_syscall
END(ia32_syscall)
ia32_badsys: ia32_badsys:
movq $0,ORIG_RAX-ARGOFFSET(%rsp) movq $0,ORIG_RAX-ARGOFFSET(%rsp)
movq $-ENOSYS,RAX-ARGOFFSET(%rsp) movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
ni_syscall:
movq %rax,%rdi
jmp sys32_ni_syscall
quiet_ni_syscall: quiet_ni_syscall:
movq $-ENOSYS,%rax movq $-ENOSYS,%rax
ret ret
...@@ -370,10 +369,10 @@ ENTRY(ia32_ptregs_common) ...@@ -370,10 +369,10 @@ ENTRY(ia32_ptregs_common)
RESTORE_REST RESTORE_REST
jmp ia32_sysret /* misbalances the return cache */ jmp ia32_sysret /* misbalances the return cache */
CFI_ENDPROC CFI_ENDPROC
END(ia32_ptregs_common)
.section .rodata,"a" .section .rodata,"a"
.align 8 .align 8
.globl ia32_sys_call_table
ia32_sys_call_table: ia32_sys_call_table:
.quad sys_restart_syscall .quad sys_restart_syscall
.quad sys_exit .quad sys_exit
......
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
* *
* This allows to access 64bit processes too; but there is no way to see the extended * This allows to access 64bit processes too; but there is no way to see the extended
* register contents. * register contents.
*
* $Id: ptrace32.c,v 1.16 2003/03/14 16:06:35 ak Exp $
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -27,6 +25,7 @@ ...@@ -27,6 +25,7 @@
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/i387.h> #include <asm/i387.h>
#include <asm/fpu32.h> #include <asm/fpu32.h>
#include <asm/ia32.h>
/* /*
* Determines which flags the user has access to [1 = access, 0 = no access]. * Determines which flags the user has access to [1 = access, 0 = no access].
...@@ -199,6 +198,24 @@ static int getreg32(struct task_struct *child, unsigned regno, u32 *val) ...@@ -199,6 +198,24 @@ static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
#undef R32 #undef R32
static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
{
int ret;
compat_siginfo_t *si32 = (compat_siginfo_t *)compat_ptr(data);
siginfo_t *si = compat_alloc_user_space(sizeof(siginfo_t));
if (request == PTRACE_SETSIGINFO) {
ret = copy_siginfo_from_user32(si, si32);
if (ret)
return ret;
}
ret = sys_ptrace(request, pid, addr, (unsigned long)si);
if (ret)
return ret;
if (request == PTRACE_GETSIGINFO)
ret = copy_siginfo_to_user32(si32, si);
return ret;
}
asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
{ {
struct task_struct *child; struct task_struct *child;
...@@ -208,9 +225,19 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) ...@@ -208,9 +225,19 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
__u32 val; __u32 val;
switch (request) { switch (request) {
default: case PTRACE_TRACEME:
case PTRACE_ATTACH:
case PTRACE_KILL:
case PTRACE_CONT:
case PTRACE_SINGLESTEP:
case PTRACE_DETACH:
case PTRACE_SYSCALL:
case PTRACE_SETOPTIONS:
return sys_ptrace(request, pid, addr, data); return sys_ptrace(request, pid, addr, data);
default:
return -EINVAL;
case PTRACE_PEEKTEXT: case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA: case PTRACE_PEEKDATA:
case PTRACE_POKEDATA: case PTRACE_POKEDATA:
...@@ -225,10 +252,11 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) ...@@ -225,10 +252,11 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
case PTRACE_GETFPXREGS: case PTRACE_GETFPXREGS:
case PTRACE_GETEVENTMSG: case PTRACE_GETEVENTMSG:
break; break;
}
if (request == PTRACE_TRACEME) case PTRACE_SETSIGINFO:
return ptrace_traceme(); case PTRACE_GETSIGINFO:
return ptrace32_siginfo(request, pid, addr, data);
}
child = ptrace_get_task_struct(pid); child = ptrace_get_task_struct(pid);
if (IS_ERR(child)) if (IS_ERR(child))
...@@ -349,8 +377,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) ...@@ -349,8 +377,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
break; break;
default: default:
ret = -EINVAL; BUG();
break;
} }
out: out:
......
...@@ -508,19 +508,6 @@ sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options) ...@@ -508,19 +508,6 @@ sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
return compat_sys_wait4(pid, stat_addr, options, NULL); return compat_sys_wait4(pid, stat_addr, options, NULL);
} }
int sys32_ni_syscall(int call)
{
struct task_struct *me = current;
static char lastcomm[sizeof(me->comm)];
if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
printk(KERN_INFO "IA32 syscall %d from %s not implemented\n",
call, me->comm);
strncpy(lastcomm, me->comm, sizeof(lastcomm));
}
return -ENOSYS;
}
/* 32-bit timeval and related flotsam. */ /* 32-bit timeval and related flotsam. */
asmlinkage long asmlinkage long
...@@ -916,7 +903,7 @@ long sys32_vm86_warning(void) ...@@ -916,7 +903,7 @@ long sys32_vm86_warning(void)
struct task_struct *me = current; struct task_struct *me = current;
static char lastcomm[sizeof(me->comm)]; static char lastcomm[sizeof(me->comm)];
if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) { if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n", compat_printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
me->comm); me->comm);
strncpy(lastcomm, me->comm, sizeof(lastcomm)); strncpy(lastcomm, me->comm, sizeof(lastcomm));
} }
...@@ -929,13 +916,3 @@ long sys32_lookup_dcookie(u32 addr_low, u32 addr_high, ...@@ -929,13 +916,3 @@ long sys32_lookup_dcookie(u32 addr_low, u32 addr_high,
return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len); return sys_lookup_dcookie(((u64)addr_high << 32) | addr_low, buf, len);
} }
static int __init ia32_init (void)
{
printk("IA32 emulation $Id: sys_ia32.c,v 1.32 2002/03/24 13:02:28 ak Exp $\n");
return 0;
}
__initcall(ia32_init);
extern unsigned long ia32_sys_call_table[];
EXPORT_SYMBOL(ia32_sys_call_table);
...@@ -8,7 +8,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \ ...@@ -8,7 +8,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \ ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
x8664_ksyms.o i387.o syscall.o vsyscall.o \ x8664_ksyms.o i387.o syscall.o vsyscall.o \
setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \ setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
pci-dma.o pci-nommu.o pci-dma.o pci-nommu.o alternative.o
obj-$(CONFIG_X86_MCE) += mce.o obj-$(CONFIG_X86_MCE) += mce.o
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
...@@ -28,11 +28,13 @@ obj-$(CONFIG_PM) += suspend.o ...@@ -28,11 +28,13 @@ obj-$(CONFIG_PM) += suspend.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
obj-$(CONFIG_CPU_FREQ) += cpufreq/ obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o obj-$(CONFIG_IOMMU) += pci-gart.o aperture.o
obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary.o tce.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o
obj-$(CONFIG_X86_VSMP) += vsmp.o obj-$(CONFIG_X86_VSMP) += vsmp.o
obj-$(CONFIG_K8_NB) += k8.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
...@@ -49,3 +51,5 @@ intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o ...@@ -49,3 +51,5 @@ intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
quirks-y += ../../i386/kernel/quirks.o quirks-y += ../../i386/kernel/quirks.o
i8237-y += ../../i386/kernel/i8237.o i8237-y += ../../i386/kernel/i8237.o
msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
alternative-y += ../../i386/kernel/alternative.o
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
* because only the bootmem allocator can allocate 32+MB. * because only the bootmem allocator can allocate 32+MB.
* *
* Copyright 2002 Andi Kleen, SuSE Labs. * Copyright 2002 Andi Kleen, SuSE Labs.
* $Id: aperture.c,v 1.7 2003/08/01 03:36:18 ak Exp $
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -24,6 +23,7 @@ ...@@ -24,6 +23,7 @@
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/k8.h>
int iommu_aperture; int iommu_aperture;
int iommu_aperture_disabled __initdata = 0; int iommu_aperture_disabled __initdata = 0;
...@@ -37,8 +37,6 @@ int fix_aperture __initdata = 1; ...@@ -37,8 +37,6 @@ int fix_aperture __initdata = 1;
/* This code runs before the PCI subsystem is initialized, so just /* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */ access the northbridge directly. */
#define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16))
static u32 __init allocate_aperture(void) static u32 __init allocate_aperture(void)
{ {
pg_data_t *nd0 = NODE_DATA(0); pg_data_t *nd0 = NODE_DATA(0);
...@@ -68,20 +66,20 @@ static u32 __init allocate_aperture(void) ...@@ -68,20 +66,20 @@ static u32 __init allocate_aperture(void)
return (u32)__pa(p); return (u32)__pa(p);
} }
static int __init aperture_valid(char *name, u64 aper_base, u32 aper_size) static int __init aperture_valid(u64 aper_base, u32 aper_size)
{ {
if (!aper_base) if (!aper_base)
return 0; return 0;
if (aper_size < 64*1024*1024) { if (aper_size < 64*1024*1024) {
printk("Aperture from %s too small (%d MB)\n", name, aper_size>>20); printk("Aperture too small (%d MB)\n", aper_size>>20);
return 0; return 0;
} }
if (aper_base + aper_size >= 0xffffffff) { if (aper_base + aper_size >= 0xffffffff) {
printk("Aperture from %s beyond 4GB. Ignoring.\n",name); printk("Aperture beyond 4GB. Ignoring.\n");
return 0; return 0;
} }
if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name); printk("Aperture pointing to e820 RAM. Ignoring.\n");
return 0; return 0;
} }
return 1; return 1;
...@@ -140,7 +138,7 @@ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order) ...@@ -140,7 +138,7 @@ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order)
printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n",
aper, 32 << *order, apsizereg); aper, 32 << *order, apsizereg);
if (!aperture_valid("AGP bridge", aper, (32*1024*1024) << *order)) if (!aperture_valid(aper, (32*1024*1024) << *order))
return 0; return 0;
return (u32)aper; return (u32)aper;
} }
...@@ -208,10 +206,10 @@ void __init iommu_hole_init(void) ...@@ -208,10 +206,10 @@ void __init iommu_hole_init(void)
fix = 0; fix = 0;
for (num = 24; num < 32; num++) { for (num = 24; num < 32; num++) {
char name[30]; if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
if (read_pci_config(0, num, 3, 0x00) != NB_ID_3) continue;
continue;
iommu_detected = 1;
iommu_aperture = 1; iommu_aperture = 1;
aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7; aper_order = (read_pci_config(0, num, 3, 0x90) >> 1) & 7;
...@@ -222,9 +220,7 @@ void __init iommu_hole_init(void) ...@@ -222,9 +220,7 @@ void __init iommu_hole_init(void)
printk("CPU %d: aperture @ %Lx size %u MB\n", num-24, printk("CPU %d: aperture @ %Lx size %u MB\n", num-24,
aper_base, aper_size>>20); aper_base, aper_size>>20);
sprintf(name, "northbridge cpu %d", num-24); if (!aperture_valid(aper_base, aper_size)) {
if (!aperture_valid(name, aper_base, aper_size)) {
fix = 1; fix = 1;
break; break;
} }
...@@ -273,7 +269,7 @@ void __init iommu_hole_init(void) ...@@ -273,7 +269,7 @@ void __init iommu_hole_init(void)
/* Fix up the north bridges */ /* Fix up the north bridges */
for (num = 24; num < 32; num++) { for (num = 24; num < 32; num++) {
if (read_pci_config(0, num, 3, 0x00) != NB_ID_3) if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
continue; continue;
/* Don't enable translation yet. That is done later. /* Don't enable translation yet. That is done later.
......
...@@ -100,7 +100,7 @@ void clear_local_APIC(void) ...@@ -100,7 +100,7 @@ void clear_local_APIC(void)
maxlvt = get_maxlvt(); maxlvt = get_maxlvt();
/* /*
* Masking an LVT entry on a P6 can trigger a local APIC error * Masking an LVT entry can trigger a local APIC error
* if the vector is zero. Mask LVTERR first to prevent this. * if the vector is zero. Mask LVTERR first to prevent this.
*/ */
if (maxlvt >= 3) { if (maxlvt >= 3) {
...@@ -851,7 +851,18 @@ void disable_APIC_timer(void) ...@@ -851,7 +851,18 @@ void disable_APIC_timer(void)
unsigned long v; unsigned long v;
v = apic_read(APIC_LVTT); v = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, v | APIC_LVT_MASKED); /*
* When an illegal vector value (0-15) is written to an LVT
* entry and delivery mode is Fixed, the APIC may signal an
* illegal vector error, with out regard to whether the mask
* bit is set or whether an interrupt is actually seen on input.
*
* Boot sequence might call this function when the LVTT has
* '0' vector value. So make sure vector field is set to
* valid value.
*/
v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
apic_write(APIC_LVTT, v);
} }
} }
...@@ -909,15 +920,13 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -909,15 +920,13 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL; return -EINVAL;
} }
#ifdef CONFIG_X86_MCE_AMD void setup_APIC_extened_lvt(unsigned char lvt_off, unsigned char vector,
void setup_threshold_lvt(unsigned long lvt_off) unsigned char msg_type, unsigned char mask)
{ {
unsigned int v = 0; unsigned long reg = (lvt_off << 4) + K8_APIC_EXT_LVT_BASE;
unsigned long reg = (lvt_off << 4) + 0x500; unsigned int v = (mask << 16) | (msg_type << 8) | vector;
v |= THRESHOLD_APIC_VECTOR;
apic_write(reg, v); apic_write(reg, v);
} }
#endif /* CONFIG_X86_MCE_AMD */
#undef APIC_DIVISOR #undef APIC_DIVISOR
...@@ -983,7 +992,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs) ...@@ -983,7 +992,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
} }
/* /*
* oem_force_hpet_timer -- force HPET mode for some boxes. * apic_is_clustered_box() -- Check if we can expect good TSC
* *
* Thus far, the major user of this is IBM's Summit2 series: * Thus far, the major user of this is IBM's Summit2 series:
* *
...@@ -991,7 +1000,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs) ...@@ -991,7 +1000,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
* multi-chassis. Use available data to take a good guess. * multi-chassis. Use available data to take a good guess.
* If in doubt, go HPET. * If in doubt, go HPET.
*/ */
__cpuinit int oem_force_hpet_timer(void) __cpuinit int apic_is_clustered_box(void)
{ {
int i, clusters, zeros; int i, clusters, zeros;
unsigned id; unsigned id;
...@@ -1022,8 +1031,7 @@ __cpuinit int oem_force_hpet_timer(void) ...@@ -1022,8 +1031,7 @@ __cpuinit int oem_force_hpet_timer(void)
} }
/* /*
* If clusters > 2, then should be multi-chassis. Return 1 for HPET. * If clusters > 2, then should be multi-chassis.
* Else return 0 to use TSC.
* May have to revisit this when multi-core + hyperthreaded CPUs come * May have to revisit this when multi-core + hyperthreaded CPUs come
* out, but AFAIK this will work even for them. * out, but AFAIK this will work even for them.
*/ */
......
...@@ -111,14 +111,14 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu) ...@@ -111,14 +111,14 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
atomic_dec(&waiting_for_crash_ipi); atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */ /* Assume hlt works */
for(;;) for(;;)
asm("hlt"); halt();
return 1; return 1;
} }
static void smp_send_nmi_allbutself(void) static void smp_send_nmi_allbutself(void)
{ {
send_IPI_allbutself(APIC_DM_NMI); send_IPI_allbutself(NMI_VECTOR);
} }
/* /*
......
/* /*
* Handle the memory map. * Handle the memory map.
* The functions here do the job until bootmem takes over. * The functions here do the job until bootmem takes over.
* $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $
* *
* Getting sanitize_e820_map() in sync with i386 version by applying change: * Getting sanitize_e820_map() in sync with i386 version by applying change:
* - Provisions for empty E820 memory regions (reported by certain BIOSes). * - Provisions for empty E820 memory regions (reported by certain BIOSes).
...@@ -621,6 +620,7 @@ void __init parse_memmapopt(char *p, char **from) ...@@ -621,6 +620,7 @@ void __init parse_memmapopt(char *p, char **from)
} }
unsigned long pci_mem_start = 0xaeedbabe; unsigned long pci_mem_start = 0xaeedbabe;
EXPORT_SYMBOL(pci_mem_start);
/* /*
* Search for the biggest gap in the low 32 bits of the e820 * Search for the biggest gap in the low 32 bits of the e820
......
...@@ -154,6 +154,7 @@ rff_trace: ...@@ -154,6 +154,7 @@ rff_trace:
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
jmp rff_action jmp rff_action
CFI_ENDPROC CFI_ENDPROC
END(ret_from_fork)
/* /*
* System call entry. Upto 6 arguments in registers are supported. * System call entry. Upto 6 arguments in registers are supported.
...@@ -188,7 +189,7 @@ rff_trace: ...@@ -188,7 +189,7 @@ rff_trace:
ENTRY(system_call) ENTRY(system_call)
CFI_STARTPROC simple CFI_STARTPROC simple
CFI_DEF_CFA rsp,0 CFI_DEF_CFA rsp,PDA_STACKOFFSET
CFI_REGISTER rip,rcx CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/ /*CFI_REGISTER rflags,r11*/
swapgs swapgs
...@@ -285,6 +286,7 @@ tracesys: ...@@ -285,6 +286,7 @@ tracesys:
/* Use IRET because user could have changed frame */ /* Use IRET because user could have changed frame */
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
CFI_ENDPROC CFI_ENDPROC
END(system_call)
/* /*
* Syscall return path ending with IRET. * Syscall return path ending with IRET.
...@@ -364,6 +366,7 @@ int_restore_rest: ...@@ -364,6 +366,7 @@ int_restore_rest:
cli cli
jmp int_with_check jmp int_with_check
CFI_ENDPROC CFI_ENDPROC
END(int_ret_from_sys_call)
/* /*
* Certain special system calls that need to save a complete full stack frame. * Certain special system calls that need to save a complete full stack frame.
...@@ -375,6 +378,7 @@ int_restore_rest: ...@@ -375,6 +378,7 @@ int_restore_rest:
leaq \func(%rip),%rax leaq \func(%rip),%rax
leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
jmp ptregscall_common jmp ptregscall_common
END(\label)
.endm .endm
CFI_STARTPROC CFI_STARTPROC
...@@ -404,6 +408,7 @@ ENTRY(ptregscall_common) ...@@ -404,6 +408,7 @@ ENTRY(ptregscall_common)
CFI_REL_OFFSET rip, 0 CFI_REL_OFFSET rip, 0
ret ret
CFI_ENDPROC CFI_ENDPROC
END(ptregscall_common)
ENTRY(stub_execve) ENTRY(stub_execve)
CFI_STARTPROC CFI_STARTPROC
...@@ -418,6 +423,7 @@ ENTRY(stub_execve) ...@@ -418,6 +423,7 @@ ENTRY(stub_execve)
RESTORE_REST RESTORE_REST
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
CFI_ENDPROC CFI_ENDPROC
END(stub_execve)
/* /*
* sigreturn is special because it needs to restore all registers on return. * sigreturn is special because it needs to restore all registers on return.
...@@ -435,6 +441,7 @@ ENTRY(stub_rt_sigreturn) ...@@ -435,6 +441,7 @@ ENTRY(stub_rt_sigreturn)
RESTORE_REST RESTORE_REST
jmp int_ret_from_sys_call jmp int_ret_from_sys_call
CFI_ENDPROC CFI_ENDPROC
END(stub_rt_sigreturn)
/* /*
* initial frame state for interrupts and exceptions * initial frame state for interrupts and exceptions
...@@ -466,29 +473,18 @@ ENTRY(stub_rt_sigreturn) ...@@ -466,29 +473,18 @@ ENTRY(stub_rt_sigreturn)
/* 0(%rsp): interrupt number */ /* 0(%rsp): interrupt number */
.macro interrupt func .macro interrupt func
cld cld
#ifdef CONFIG_DEBUG_INFO
SAVE_ALL
movq %rsp,%rdi
/*
* Setup a stack frame pointer. This allows gdb to trace
* back to the original stack.
*/
movq %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
#else
SAVE_ARGS SAVE_ARGS
leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
#endif pushq %rbp
CFI_ADJUST_CFA_OFFSET 8
CFI_REL_OFFSET rbp, 0
movq %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
testl $3,CS(%rdi) testl $3,CS(%rdi)
je 1f je 1f
swapgs swapgs
1: incl %gs:pda_irqcount # RED-PEN should check preempt count 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
movq %gs:pda_irqstackptr,%rax cmoveq %gs:pda_irqstackptr,%rsp
cmoveq %rax,%rsp /*todo This needs CFI annotation! */
pushq %rdi # save old stack
#ifndef CONFIG_DEBUG_INFO
CFI_ADJUST_CFA_OFFSET 8
#endif
call \func call \func
.endm .endm
...@@ -497,17 +493,11 @@ ENTRY(common_interrupt) ...@@ -497,17 +493,11 @@ ENTRY(common_interrupt)
interrupt do_IRQ interrupt do_IRQ
/* 0(%rsp): oldrsp-ARGOFFSET */ /* 0(%rsp): oldrsp-ARGOFFSET */
ret_from_intr: ret_from_intr:
popq %rdi
#ifndef CONFIG_DEBUG_INFO
CFI_ADJUST_CFA_OFFSET -8
#endif
cli cli
decl %gs:pda_irqcount decl %gs:pda_irqcount
#ifdef CONFIG_DEBUG_INFO leaveq
movq RBP(%rdi),%rbp
CFI_DEF_CFA_REGISTER rsp CFI_DEF_CFA_REGISTER rsp
#endif CFI_ADJUST_CFA_OFFSET -8
leaq ARGOFFSET(%rdi),%rsp /*todo This needs CFI annotation! */
exit_intr: exit_intr:
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
testl $3,CS-ARGOFFSET(%rsp) testl $3,CS-ARGOFFSET(%rsp)
...@@ -589,7 +579,9 @@ retint_kernel: ...@@ -589,7 +579,9 @@ retint_kernel:
call preempt_schedule_irq call preempt_schedule_irq
jmp exit_intr jmp exit_intr
#endif #endif
CFI_ENDPROC CFI_ENDPROC
END(common_interrupt)
/* /*
* APIC interrupts. * APIC interrupts.
...@@ -605,17 +597,21 @@ retint_kernel: ...@@ -605,17 +597,21 @@ retint_kernel:
ENTRY(thermal_interrupt) ENTRY(thermal_interrupt)
apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
END(thermal_interrupt)
ENTRY(threshold_interrupt) ENTRY(threshold_interrupt)
apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
END(threshold_interrupt)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
ENTRY(reschedule_interrupt) ENTRY(reschedule_interrupt)
apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
END(reschedule_interrupt)
.macro INVALIDATE_ENTRY num .macro INVALIDATE_ENTRY num
ENTRY(invalidate_interrupt\num) ENTRY(invalidate_interrupt\num)
apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
END(invalidate_interrupt\num)
.endm .endm
INVALIDATE_ENTRY 0 INVALIDATE_ENTRY 0
...@@ -629,17 +625,21 @@ ENTRY(invalidate_interrupt\num) ...@@ -629,17 +625,21 @@ ENTRY(invalidate_interrupt\num)
ENTRY(call_function_interrupt) ENTRY(call_function_interrupt)
apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
END(call_function_interrupt)
#endif #endif
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
ENTRY(apic_timer_interrupt) ENTRY(apic_timer_interrupt)
apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
END(apic_timer_interrupt)
ENTRY(error_interrupt) ENTRY(error_interrupt)
apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
END(error_interrupt)
ENTRY(spurious_interrupt) ENTRY(spurious_interrupt)
apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
END(spurious_interrupt)
#endif #endif
/* /*
...@@ -777,6 +777,7 @@ error_kernelspace: ...@@ -777,6 +777,7 @@ error_kernelspace:
cmpq $gs_change,RIP(%rsp) cmpq $gs_change,RIP(%rsp)
je error_swapgs je error_swapgs
jmp error_sti jmp error_sti
END(error_entry)
/* Reload gs selector with exception handling */ /* Reload gs selector with exception handling */
/* edi: new selector */ /* edi: new selector */
...@@ -794,6 +795,7 @@ gs_change: ...@@ -794,6 +795,7 @@ gs_change:
CFI_ADJUST_CFA_OFFSET -8 CFI_ADJUST_CFA_OFFSET -8
ret ret
CFI_ENDPROC CFI_ENDPROC
ENDPROC(load_gs_index)
.section __ex_table,"a" .section __ex_table,"a"
.align 8 .align 8
...@@ -847,7 +849,7 @@ ENTRY(kernel_thread) ...@@ -847,7 +849,7 @@ ENTRY(kernel_thread)
UNFAKE_STACK_FRAME UNFAKE_STACK_FRAME
ret ret
CFI_ENDPROC CFI_ENDPROC
ENDPROC(kernel_thread)
child_rip: child_rip:
/* /*
...@@ -860,6 +862,7 @@ child_rip: ...@@ -860,6 +862,7 @@ child_rip:
# exit # exit
xorl %edi, %edi xorl %edi, %edi
call do_exit call do_exit
ENDPROC(child_rip)
/* /*
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
...@@ -889,19 +892,24 @@ ENTRY(execve) ...@@ -889,19 +892,24 @@ ENTRY(execve)
UNFAKE_STACK_FRAME UNFAKE_STACK_FRAME
ret ret
CFI_ENDPROC CFI_ENDPROC
ENDPROC(execve)
KPROBE_ENTRY(page_fault) KPROBE_ENTRY(page_fault)
errorentry do_page_fault errorentry do_page_fault
END(page_fault)
.previous .text .previous .text
ENTRY(coprocessor_error) ENTRY(coprocessor_error)
zeroentry do_coprocessor_error zeroentry do_coprocessor_error
END(coprocessor_error)
ENTRY(simd_coprocessor_error) ENTRY(simd_coprocessor_error)
zeroentry do_simd_coprocessor_error zeroentry do_simd_coprocessor_error
END(simd_coprocessor_error)
ENTRY(device_not_available) ENTRY(device_not_available)
zeroentry math_state_restore zeroentry math_state_restore
END(device_not_available)
/* runs on exception stack */ /* runs on exception stack */
KPROBE_ENTRY(debug) KPROBE_ENTRY(debug)
...@@ -911,6 +919,7 @@ KPROBE_ENTRY(debug) ...@@ -911,6 +919,7 @@ KPROBE_ENTRY(debug)
paranoidentry do_debug, DEBUG_STACK paranoidentry do_debug, DEBUG_STACK
jmp paranoid_exit jmp paranoid_exit
CFI_ENDPROC CFI_ENDPROC
END(debug)
.previous .text .previous .text
/* runs on exception stack */ /* runs on exception stack */
...@@ -961,6 +970,7 @@ paranoid_schedule: ...@@ -961,6 +970,7 @@ paranoid_schedule:
cli cli
jmp paranoid_userspace jmp paranoid_userspace
CFI_ENDPROC CFI_ENDPROC
END(nmi)
.previous .text .previous .text
KPROBE_ENTRY(int3) KPROBE_ENTRY(int3)
...@@ -970,22 +980,28 @@ KPROBE_ENTRY(int3) ...@@ -970,22 +980,28 @@ KPROBE_ENTRY(int3)
paranoidentry do_int3, DEBUG_STACK paranoidentry do_int3, DEBUG_STACK
jmp paranoid_exit jmp paranoid_exit
CFI_ENDPROC CFI_ENDPROC
END(int3)
.previous .text .previous .text
ENTRY(overflow) ENTRY(overflow)
zeroentry do_overflow zeroentry do_overflow
END(overflow)
ENTRY(bounds) ENTRY(bounds)
zeroentry do_bounds zeroentry do_bounds
END(bounds)
ENTRY(invalid_op) ENTRY(invalid_op)
zeroentry do_invalid_op zeroentry do_invalid_op
END(invalid_op)
ENTRY(coprocessor_segment_overrun) ENTRY(coprocessor_segment_overrun)
zeroentry do_coprocessor_segment_overrun zeroentry do_coprocessor_segment_overrun
END(coprocessor_segment_overrun)
ENTRY(reserved) ENTRY(reserved)
zeroentry do_reserved zeroentry do_reserved
END(reserved)
/* runs on exception stack */ /* runs on exception stack */
ENTRY(double_fault) ENTRY(double_fault)
...@@ -993,12 +1009,15 @@ ENTRY(double_fault) ...@@ -993,12 +1009,15 @@ ENTRY(double_fault)
paranoidentry do_double_fault paranoidentry do_double_fault
jmp paranoid_exit jmp paranoid_exit
CFI_ENDPROC CFI_ENDPROC
END(double_fault)
ENTRY(invalid_TSS) ENTRY(invalid_TSS)
errorentry do_invalid_TSS errorentry do_invalid_TSS
END(invalid_TSS)
ENTRY(segment_not_present) ENTRY(segment_not_present)
errorentry do_segment_not_present errorentry do_segment_not_present
END(segment_not_present)
/* runs on exception stack */ /* runs on exception stack */
ENTRY(stack_segment) ENTRY(stack_segment)
...@@ -1006,19 +1025,24 @@ ENTRY(stack_segment) ...@@ -1006,19 +1025,24 @@ ENTRY(stack_segment)
paranoidentry do_stack_segment paranoidentry do_stack_segment
jmp paranoid_exit jmp paranoid_exit
CFI_ENDPROC CFI_ENDPROC
END(stack_segment)
KPROBE_ENTRY(general_protection) KPROBE_ENTRY(general_protection)
errorentry do_general_protection errorentry do_general_protection
END(general_protection)
.previous .text .previous .text
ENTRY(alignment_check) ENTRY(alignment_check)
errorentry do_alignment_check errorentry do_alignment_check
END(alignment_check)
ENTRY(divide_error) ENTRY(divide_error)
zeroentry do_divide_error zeroentry do_divide_error
END(divide_error)
ENTRY(spurious_interrupt_bug) ENTRY(spurious_interrupt_bug)
zeroentry do_spurious_interrupt_bug zeroentry do_spurious_interrupt_bug
END(spurious_interrupt_bug)
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
/* runs on exception stack */ /* runs on exception stack */
...@@ -1029,6 +1053,7 @@ ENTRY(machine_check) ...@@ -1029,6 +1053,7 @@ ENTRY(machine_check)
paranoidentry do_machine_check paranoidentry do_machine_check
jmp paranoid_exit jmp paranoid_exit
CFI_ENDPROC CFI_ENDPROC
END(machine_check)
#endif #endif
ENTRY(call_softirq) ENTRY(call_softirq)
...@@ -1046,3 +1071,37 @@ ENTRY(call_softirq) ...@@ -1046,3 +1071,37 @@ ENTRY(call_softirq)
decl %gs:pda_irqcount decl %gs:pda_irqcount
ret ret
CFI_ENDPROC CFI_ENDPROC
ENDPROC(call_softirq)
#ifdef CONFIG_STACK_UNWIND
ENTRY(arch_unwind_init_running)
CFI_STARTPROC
movq %r15, R15(%rdi)
movq %r14, R14(%rdi)
xchgq %rsi, %rdx
movq %r13, R13(%rdi)
movq %r12, R12(%rdi)
xorl %eax, %eax
movq %rbp, RBP(%rdi)
movq %rbx, RBX(%rdi)
movq (%rsp), %rcx
movq %rax, R11(%rdi)
movq %rax, R10(%rdi)
movq %rax, R9(%rdi)
movq %rax, R8(%rdi)
movq %rax, RAX(%rdi)
movq %rax, RCX(%rdi)
movq %rax, RDX(%rdi)
movq %rax, RSI(%rdi)
movq %rax, RDI(%rdi)
movq %rax, ORIG_RAX(%rdi)
movq %rcx, RIP(%rdi)
leaq 8(%rsp), %rcx
movq $__KERNEL_CS, CS(%rdi)
movq %rax, EFLAGS(%rdi)
movq %rcx, RSP(%rdi)
movq $__KERNEL_DS, SS(%rdi)
jmpq *%rdx
CFI_ENDPROC
ENDPROC(arch_unwind_init_running)
#endif
...@@ -78,22 +78,29 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector) ...@@ -78,22 +78,29 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
static void flat_send_IPI_allbutself(int vector) static void flat_send_IPI_allbutself(int vector)
{ {
#ifndef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
if (((num_online_cpus()) - 1) >= 1) int hotplug = 1;
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
#else #else
cpumask_t allbutme = cpu_online_map; int hotplug = 0;
#endif
if (hotplug || vector == NMI_VECTOR) {
cpumask_t allbutme = cpu_online_map;
cpu_clear(smp_processor_id(), allbutme); cpu_clear(smp_processor_id(), allbutme);
if (!cpus_empty(allbutme)) if (!cpus_empty(allbutme))
flat_send_IPI_mask(allbutme, vector); flat_send_IPI_mask(allbutme, vector);
#endif } else if (num_online_cpus() > 1) {
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
}
} }
static void flat_send_IPI_all(int vector) static void flat_send_IPI_all(int vector)
{ {
__send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); if (vector == NMI_VECTOR)
flat_send_IPI_mask(cpu_online_map, vector);
else
__send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
} }
static int flat_apic_id_registered(void) static int flat_apic_id_registered(void)
...@@ -108,10 +115,7 @@ static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -108,10 +115,7 @@ static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
static unsigned int phys_pkg_id(int index_msb) static unsigned int phys_pkg_id(int index_msb)
{ {
u32 ebx; return hard_smp_processor_id() >> index_msb;
ebx = cpuid_ebx(1);
return ((ebx >> 24) & 0xFF) >> index_msb;
} }
struct genapic apic_flat = { struct genapic apic_flat = {
......
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
* linux/arch/x86_64/kernel/head64.c -- prepare to run common code * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
* *
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*
* $Id: head64.c,v 1.22 2001/07/06 14:28:20 ak Exp $
*/ */
#include <linux/init.h> #include <linux/init.h>
......
...@@ -44,11 +44,11 @@ ...@@ -44,11 +44,11 @@
BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
BI(x,c) BI(x,d) BI(x,e) BI(x,f) BI(x,c) BI(x,d) BI(x,e) BI(x,f)
#define BUILD_14_IRQS(x) \ #define BUILD_15_IRQS(x) \
BI(x,0) BI(x,1) BI(x,2) BI(x,3) \ BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
BI(x,4) BI(x,5) BI(x,6) BI(x,7) \ BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
BI(x,c) BI(x,d) BI(x,c) BI(x,d) BI(x,e)
/* /*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
...@@ -73,13 +73,13 @@ BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb) ...@@ -73,13 +73,13 @@ BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
BUILD_14_IRQS(0xe) BUILD_15_IRQS(0xe)
#endif #endif
#endif #endif
#undef BUILD_16_IRQS #undef BUILD_16_IRQS
#undef BUILD_14_IRQS #undef BUILD_15_IRQS
#undef BI #undef BI
...@@ -92,11 +92,11 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) ...@@ -92,11 +92,11 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f) IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
#define IRQLIST_14(x) \ #define IRQLIST_15(x) \
IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \ IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \ IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
IRQ(x,c), IRQ(x,d) IRQ(x,c), IRQ(x,d), IRQ(x,e)
void (*interrupt[NR_IRQS])(void) = { void (*interrupt[NR_IRQS])(void) = {
IRQLIST_16(0x0), IRQLIST_16(0x0),
...@@ -108,7 +108,7 @@ void (*interrupt[NR_IRQS])(void) = { ...@@ -108,7 +108,7 @@ void (*interrupt[NR_IRQS])(void) = {
IRQLIST_16(0xc), IRQLIST_16(0xd) IRQLIST_16(0xc), IRQLIST_16(0xd)
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
, IRQLIST_14(0xe) , IRQLIST_15(0xe)
#endif #endif
#endif #endif
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <asm/mach_apic.h> #include <asm/mach_apic.h>
#include <asm/acpi.h> #include <asm/acpi.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/nmi.h>
#define __apicdebuginit __init #define __apicdebuginit __init
...@@ -56,6 +57,7 @@ int timer_over_8254 __initdata = 0; ...@@ -56,6 +57,7 @@ int timer_over_8254 __initdata = 0;
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
static DEFINE_SPINLOCK(ioapic_lock); static DEFINE_SPINLOCK(ioapic_lock);
static DEFINE_SPINLOCK(vector_lock);
/* /*
* # of IRQ routing registers * # of IRQ routing registers
...@@ -317,7 +319,7 @@ void __init check_ioapic(void) ...@@ -317,7 +319,7 @@ void __init check_ioapic(void)
vendor &= 0xffff; vendor &= 0xffff;
switch (vendor) { switch (vendor) {
case PCI_VENDOR_ID_VIA: case PCI_VENDOR_ID_VIA:
#ifdef CONFIG_GART_IOMMU #ifdef CONFIG_IOMMU
if ((end_pfn > MAX_DMA32_PFN || if ((end_pfn > MAX_DMA32_PFN ||
force_iommu) && force_iommu) &&
!iommu_aperture_allowed) { !iommu_aperture_allowed) {
...@@ -834,10 +836,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }; ...@@ -834,10 +836,17 @@ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
int assign_irq_vector(int irq) int assign_irq_vector(int irq)
{ {
static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
unsigned long flags;
int vector;
BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS); BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
spin_lock_irqsave(&vector_lock, flags);
if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
spin_unlock_irqrestore(&vector_lock, flags);
return IO_APIC_VECTOR(irq); return IO_APIC_VECTOR(irq);
}
next: next:
current_vector += 8; current_vector += 8;
if (current_vector == IA32_SYSCALL_VECTOR) if (current_vector == IA32_SYSCALL_VECTOR)
...@@ -849,11 +858,14 @@ int assign_irq_vector(int irq) ...@@ -849,11 +858,14 @@ int assign_irq_vector(int irq)
current_vector = FIRST_DEVICE_VECTOR + offset; current_vector = FIRST_DEVICE_VECTOR + offset;
} }
vector_irq[current_vector] = irq; vector = current_vector;
vector_irq[vector] = irq;
if (irq != AUTO_ASSIGN) if (irq != AUTO_ASSIGN)
IO_APIC_VECTOR(irq) = current_vector; IO_APIC_VECTOR(irq) = vector;
spin_unlock_irqrestore(&vector_lock, flags);
return current_vector; return vector;
} }
extern void (*interrupt[NR_IRQS])(void); extern void (*interrupt[NR_IRQS])(void);
...@@ -866,21 +878,14 @@ static struct hw_interrupt_type ioapic_edge_type; ...@@ -866,21 +878,14 @@ static struct hw_interrupt_type ioapic_edge_type;
static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger)
{ {
if (use_pci_vector() && !platform_legacy_irq(irq)) { unsigned idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL) if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
irq_desc[vector].handler = &ioapic_level_type; trigger == IOAPIC_LEVEL)
else irq_desc[idx].handler = &ioapic_level_type;
irq_desc[vector].handler = &ioapic_edge_type; else
set_intr_gate(vector, interrupt[vector]); irq_desc[idx].handler = &ioapic_edge_type;
} else { set_intr_gate(vector, interrupt[idx]);
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
trigger == IOAPIC_LEVEL)
irq_desc[irq].handler = &ioapic_level_type;
else
irq_desc[irq].handler = &ioapic_edge_type;
set_intr_gate(vector, interrupt[irq]);
}
} }
static void __init setup_IO_APIC_irqs(void) static void __init setup_IO_APIC_irqs(void)
......
...@@ -26,6 +26,30 @@ atomic_t irq_mis_count; ...@@ -26,6 +26,30 @@ atomic_t irq_mis_count;
#endif #endif
#endif #endif
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/*
* Probabilistic stack overflow check:
*
* Only check the stack in process context, because everything else
* runs on the big interrupt stacks. Checking reliably is too expensive,
* so we just check from interrupts.
*/
static inline void stack_overflow_check(struct pt_regs *regs)
{
u64 curbase = (u64) current->thread_info;
static unsigned long warned = -60*HZ;
if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
regs->rsp < curbase + sizeof(struct thread_info) + 128 &&
time_after(jiffies, warned + 60*HZ)) {
printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
current->comm, curbase, regs->rsp);
show_stack(NULL,NULL);
warned = jiffies;
}
}
#endif
/* /*
* Generic, controller-independent functions: * Generic, controller-independent functions:
*/ */
...@@ -39,7 +63,7 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -39,7 +63,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) { if (i == 0) {
seq_printf(p, " "); seq_printf(p, " ");
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "CPU%d ",j); seq_printf(p, "CPU%-8d",j);
seq_putc(p, '\n'); seq_putc(p, '\n');
} }
...@@ -96,7 +120,9 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) ...@@ -96,7 +120,9 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
exit_idle(); exit_idle();
irq_enter(); irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
stack_overflow_check(regs);
#endif
__do_IRQ(irq, regs); __do_IRQ(irq, regs);
irq_exit(); irq_exit();
......
/*
* Shared support code for AMD K8 northbridges and derivates.
* Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
*/
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/k8.h>
int num_k8_northbridges;
EXPORT_SYMBOL(num_k8_northbridges);
static u32 *flush_words;
struct pci_device_id k8_nb_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
{}
};
EXPORT_SYMBOL(k8_nb_ids);
struct pci_dev **k8_northbridges;
EXPORT_SYMBOL(k8_northbridges);
static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
{
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (!dev)
break;
} while (!pci_match_id(&k8_nb_ids[0], dev));
return dev;
}
int cache_k8_northbridges(void)
{
int i;
struct pci_dev *dev;
if (num_k8_northbridges)
return 0;
num_k8_northbridges = 0;
dev = NULL;
while ((dev = next_k8_northbridge(dev)) != NULL)
num_k8_northbridges++;
k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *),
GFP_KERNEL);
if (!k8_northbridges)
return -ENOMEM;
flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL);
if (!flush_words) {
kfree(k8_northbridges);
return -ENOMEM;
}
dev = NULL;
i = 0;
while ((dev = next_k8_northbridge(dev)) != NULL) {
k8_northbridges[i++] = dev;
pci_read_config_dword(dev, 0x9c, &flush_words[i]);
}
k8_northbridges[i] = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(cache_k8_northbridges);
/* Ignores subdevice/subvendor but as far as I can figure out
they're useless anyways */
int __init early_is_k8_nb(u32 device)
{
struct pci_device_id *id;
u32 vendor = device & 0xffff;
device >>= 16;
for (id = k8_nb_ids; id->vendor; id++)
if (vendor == id->vendor && device == id->device)
return 1;
return 0;
}
void k8_flush_garts(void)
{
int flushed, i;
unsigned long flags;
static DEFINE_SPINLOCK(gart_lock);
/* Avoid races between AGP and IOMMU. In theory it's not needed
but I'm not sure if the hardware won't lose flush requests
when another is pending. This whole thing is so expensive anyways
that it doesn't matter to serialize more. -AK */
spin_lock_irqsave(&gart_lock, flags);
flushed = 0;
for (i = 0; i < num_k8_northbridges; i++) {
pci_write_config_dword(k8_northbridges[i], 0x9c,
flush_words[i]|1);
flushed++;
}
for (i = 0; i < num_k8_northbridges; i++) {
u32 w;
/* Make sure the hardware actually executed the flush*/
for (;;) {
pci_read_config_dword(k8_northbridges[i],
0x9c, &w);
if (!(w & 1))
break;
cpu_relax();
}
}
spin_unlock_irqrestore(&gart_lock, flags);
if (!flushed)
printk("nothing to flush?\n");
}
EXPORT_SYMBOL_GPL(k8_flush_garts);
...@@ -562,7 +562,7 @@ static struct sysdev_class mce_sysclass = { ...@@ -562,7 +562,7 @@ static struct sysdev_class mce_sysclass = {
set_kset_name("machinecheck"), set_kset_name("machinecheck"),
}; };
static DEFINE_PER_CPU(struct sys_device, device_mce); DEFINE_PER_CPU(struct sys_device, device_mce);
/* Why are there no generic functions for this? */ /* Why are there no generic functions for this? */
#define ACCESSOR(name, var, start) \ #define ACCESSOR(name, var, start) \
......
This diff is collapsed.
...@@ -145,26 +145,38 @@ int apply_relocate(Elf_Shdr *sechdrs, ...@@ -145,26 +145,38 @@ int apply_relocate(Elf_Shdr *sechdrs,
return -ENOSYS; return -ENOSYS;
} }
extern void apply_alternatives(void *start, void *end);
int module_finalize(const Elf_Ehdr *hdr, int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, const Elf_Shdr *sechdrs,
struct module *me) struct module *me)
{ {
const Elf_Shdr *s; const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
/* look for .altinstructions to patch */ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { if (!strcmp(".text", secstrings + s->sh_name))
void *seg; text = s;
if (strcmp(".altinstructions", secstrings + s->sh_name)) if (!strcmp(".altinstructions", secstrings + s->sh_name))
continue; alt = s;
seg = (void *)s->sh_addr; if (!strcmp(".smp_locks", secstrings + s->sh_name))
apply_alternatives(seg, seg + s->sh_size); locks= s;
} }
if (alt) {
/* patch .altinstructions */
void *aseg = (void *)alt->sh_addr;
apply_alternatives(aseg, aseg + alt->sh_size);
}
if (locks && text) {
void *lseg = (void *)locks->sh_addr;
void *tseg = (void *)text->sh_addr;
alternatives_smp_module_add(me, me->name,
lseg, lseg + locks->sh_size,
tseg, tseg + text->sh_size);
}
return 0; return 0;
} }
void module_arch_cleanup(struct module *mod) void module_arch_cleanup(struct module *mod)
{ {
alternatives_smp_module_del(mod);
} }
...@@ -15,11 +15,7 @@ ...@@ -15,11 +15,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
#include <linux/kernel_stat.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <linux/nmi.h> #include <linux/nmi.h>
...@@ -27,14 +23,11 @@ ...@@ -27,14 +23,11 @@
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/mtrr.h>
#include <asm/mpspec.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/msr.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/kdebug.h> #include <asm/kdebug.h>
#include <asm/local.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/intel_arch_perfmon.h>
/* /*
* lapic_nmi_owner tracks the ownership of the lapic NMI hardware: * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
...@@ -74,6 +67,9 @@ static unsigned int nmi_p4_cccr_val; ...@@ -74,6 +67,9 @@ static unsigned int nmi_p4_cccr_val;
#define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
#define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
#define MSR_P4_MISC_ENABLE 0x1A0 #define MSR_P4_MISC_ENABLE 0x1A0
#define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
#define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
...@@ -105,7 +101,10 @@ static __cpuinit inline int nmi_known_cpu(void) ...@@ -105,7 +101,10 @@ static __cpuinit inline int nmi_known_cpu(void)
case X86_VENDOR_AMD: case X86_VENDOR_AMD:
return boot_cpu_data.x86 == 15; return boot_cpu_data.x86 == 15;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
return boot_cpu_data.x86 == 15; if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
return 1;
else
return (boot_cpu_data.x86 == 15);
} }
return 0; return 0;
} }
...@@ -211,6 +210,8 @@ int __init setup_nmi_watchdog(char *str) ...@@ -211,6 +210,8 @@ int __init setup_nmi_watchdog(char *str)
__setup("nmi_watchdog=", setup_nmi_watchdog); __setup("nmi_watchdog=", setup_nmi_watchdog);
static void disable_intel_arch_watchdog(void);
static void disable_lapic_nmi_watchdog(void) static void disable_lapic_nmi_watchdog(void)
{ {
if (nmi_active <= 0) if (nmi_active <= 0)
...@@ -223,6 +224,8 @@ static void disable_lapic_nmi_watchdog(void) ...@@ -223,6 +224,8 @@ static void disable_lapic_nmi_watchdog(void)
if (boot_cpu_data.x86 == 15) { if (boot_cpu_data.x86 == 15) {
wrmsr(MSR_P4_IQ_CCCR0, 0, 0); wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
wrmsr(MSR_P4_CRU_ESCR0, 0, 0); wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
} else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
disable_intel_arch_watchdog();
} }
break; break;
} }
...@@ -375,6 +378,53 @@ static void setup_k7_watchdog(void) ...@@ -375,6 +378,53 @@ static void setup_k7_watchdog(void)
wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
} }
static void disable_intel_arch_watchdog(void)
{
unsigned ebx;
/*
* Check whether the Architectural PerfMon supports
* Unhalted Core Cycles Event or not.
* NOTE: Corresponding bit = 0 in ebp indicates event present.
*/
ebx = cpuid_ebx(10);
if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
}
static int setup_intel_arch_watchdog(void)
{
unsigned int evntsel;
unsigned ebx;
/*
* Check whether the Architectural PerfMon supports
* Unhalted Core Cycles Event or not.
* NOTE: Corresponding bit = 0 in ebp indicates event present.
*/
ebx = cpuid_ebx(10);
if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
return 0;
nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
evntsel = ARCH_PERFMON_EVENTSEL_INT
| ARCH_PERFMON_EVENTSEL_OS
| ARCH_PERFMON_EVENTSEL_USR
| ARCH_PERFMON_NMI_EVENT_SEL
| ARCH_PERFMON_NMI_EVENT_UMASK;
wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
apic_write(APIC_LVTPC, APIC_DM_NMI);
evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
return 1;
}
static int setup_p4_watchdog(void) static int setup_p4_watchdog(void)
{ {
...@@ -428,10 +478,16 @@ void setup_apic_nmi_watchdog(void) ...@@ -428,10 +478,16 @@ void setup_apic_nmi_watchdog(void)
setup_k7_watchdog(); setup_k7_watchdog();
break; break;
case X86_VENDOR_INTEL: case X86_VENDOR_INTEL:
if (boot_cpu_data.x86 != 15) if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
return; if (!setup_intel_arch_watchdog())
if (!setup_p4_watchdog()) return;
} else if (boot_cpu_data.x86 == 15) {
if (!setup_p4_watchdog())
return;
} else {
return; return;
}
break; break;
default: default:
...@@ -516,7 +572,14 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) ...@@ -516,7 +572,14 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
*/ */
wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
} } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
/*
* For Intel based architectural perfmon
* - LVTPC is masked on interrupt and must be
* unmasked by the LVTPC handler.
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
} }
} }
......
This diff is collapsed.
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/calgary.h>
int iommu_merge __read_mostly = 0; int iommu_merge __read_mostly = 0;
EXPORT_SYMBOL(iommu_merge); EXPORT_SYMBOL(iommu_merge);
...@@ -33,12 +34,15 @@ int panic_on_overflow __read_mostly = 0; ...@@ -33,12 +34,15 @@ int panic_on_overflow __read_mostly = 0;
int force_iommu __read_mostly= 0; int force_iommu __read_mostly= 0;
#endif #endif
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly = 0;
/* Dummy device used for NULL arguments (normally ISA). Better would /* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible be probably a smaller DMA mask, but this is bug-to-bug compatible
to i386. */ to i386. */
struct device fallback_dev = { struct device fallback_dev = {
.bus_id = "fallback device", .bus_id = "fallback device",
.coherent_dma_mask = 0xffffffff, .coherent_dma_mask = DMA_32BIT_MASK,
.dma_mask = &fallback_dev.coherent_dma_mask, .dma_mask = &fallback_dev.coherent_dma_mask,
}; };
...@@ -77,7 +81,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -77,7 +81,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
dev = &fallback_dev; dev = &fallback_dev;
dma_mask = dev->coherent_dma_mask; dma_mask = dev->coherent_dma_mask;
if (dma_mask == 0) if (dma_mask == 0)
dma_mask = 0xffffffff; dma_mask = DMA_32BIT_MASK;
/* Don't invoke OOM killer */ /* Don't invoke OOM killer */
gfp |= __GFP_NORETRY; gfp |= __GFP_NORETRY;
...@@ -90,7 +94,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -90,7 +94,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
larger than 16MB and in this case we have a chance of larger than 16MB and in this case we have a chance of
finding fitting memory in the next higher zone first. If finding fitting memory in the next higher zone first. If
not retry with true GFP_DMA. -AK */ not retry with true GFP_DMA. -AK */
if (dma_mask <= 0xffffffff) if (dma_mask <= DMA_32BIT_MASK)
gfp |= GFP_DMA32; gfp |= GFP_DMA32;
again: again:
...@@ -111,7 +115,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -111,7 +115,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
/* Don't use the 16MB ZONE_DMA unless absolutely /* Don't use the 16MB ZONE_DMA unless absolutely
needed. It's better to use remapping first. */ needed. It's better to use remapping first. */
if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) { if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
gfp = (gfp & ~GFP_DMA32) | GFP_DMA; gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
goto again; goto again;
} }
...@@ -174,7 +178,7 @@ int dma_supported(struct device *dev, u64 mask) ...@@ -174,7 +178,7 @@ int dma_supported(struct device *dev, u64 mask)
/* Copied from i386. Doesn't make much sense, because it will /* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent. only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */ The caller just has to use GFP_DMA in this case. */
if (mask < 0x00ffffff) if (mask < DMA_24BIT_MASK)
return 0; return 0;
/* Tell the device to use SAC when IOMMU force is on. This /* Tell the device to use SAC when IOMMU force is on. This
...@@ -189,7 +193,7 @@ int dma_supported(struct device *dev, u64 mask) ...@@ -189,7 +193,7 @@ int dma_supported(struct device *dev, u64 mask)
SAC for these. Assume all masks <= 40 bits are of this SAC for these. Assume all masks <= 40 bits are of this
type. Normally this doesn't make any difference, but gives type. Normally this doesn't make any difference, but gives
more gentle handling of IOMMU overflow. */ more gentle handling of IOMMU overflow. */
if (iommu_sac_force && (mask >= 0xffffffffffULL)) { if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask); printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
return 0; return 0;
} }
...@@ -266,7 +270,7 @@ __init int iommu_setup(char *p) ...@@ -266,7 +270,7 @@ __init int iommu_setup(char *p)
swiotlb = 1; swiotlb = 1;
#endif #endif
#ifdef CONFIG_GART_IOMMU #ifdef CONFIG_IOMMU
gart_parse_options(p); gart_parse_options(p);
#endif #endif
...@@ -276,3 +280,40 @@ __init int iommu_setup(char *p) ...@@ -276,3 +280,40 @@ __init int iommu_setup(char *p)
} }
return 1; return 1;
} }
__setup("iommu=", iommu_setup);
void __init pci_iommu_alloc(void)
{
/*
* The order of these functions is important for
* fall-back/fail-over reasons
*/
#ifdef CONFIG_IOMMU
iommu_hole_init();
#endif
#ifdef CONFIG_CALGARY_IOMMU
detect_calgary();
#endif
#ifdef CONFIG_SWIOTLB
pci_swiotlb_init();
#endif
}
static int __init pci_iommu_init(void)
{
#ifdef CONFIG_CALGARY_IOMMU
calgary_iommu_init();
#endif
#ifdef CONFIG_IOMMU
gart_iommu_init();
#endif
no_iommu_init();
return 0;
}
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/kdebug.h> #include <asm/kdebug.h>
#include <asm/swiotlb.h> #include <asm/swiotlb.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/k8.h>
unsigned long iommu_bus_base; /* GART remapping area (physical) */ unsigned long iommu_bus_base; /* GART remapping area (physical) */
static unsigned long iommu_size; /* size of remapping area bytes */ static unsigned long iommu_size; /* size of remapping area bytes */
...@@ -46,8 +47,6 @@ u32 *iommu_gatt_base; /* Remapping table */ ...@@ -46,8 +47,6 @@ u32 *iommu_gatt_base; /* Remapping table */
also seen with Qlogic at least). */ also seen with Qlogic at least). */
int iommu_fullflush = 1; int iommu_fullflush = 1;
#define MAX_NB 8
/* Allocation bitmap for the remapping area */ /* Allocation bitmap for the remapping area */
static DEFINE_SPINLOCK(iommu_bitmap_lock); static DEFINE_SPINLOCK(iommu_bitmap_lock);
static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */ static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
...@@ -63,13 +62,6 @@ static u32 gart_unmapped_entry; ...@@ -63,13 +62,6 @@ static u32 gart_unmapped_entry;
#define to_pages(addr,size) \ #define to_pages(addr,size) \
(round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
#define for_all_nb(dev) \
dev = NULL; \
while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)
static struct pci_dev *northbridges[MAX_NB];
static u32 northbridge_flush_word[MAX_NB];
#define EMERGENCY_PAGES 32 /* = 128KB */ #define EMERGENCY_PAGES 32 /* = 128KB */
#ifdef CONFIG_AGP #ifdef CONFIG_AGP
...@@ -93,7 +85,7 @@ static unsigned long alloc_iommu(int size) ...@@ -93,7 +85,7 @@ static unsigned long alloc_iommu(int size)
offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size); offset = find_next_zero_string(iommu_gart_bitmap,next_bit,iommu_pages,size);
if (offset == -1) { if (offset == -1) {
need_flush = 1; need_flush = 1;
offset = find_next_zero_string(iommu_gart_bitmap,0,next_bit,size); offset = find_next_zero_string(iommu_gart_bitmap,0,iommu_pages,size);
} }
if (offset != -1) { if (offset != -1) {
set_bit_string(iommu_gart_bitmap, offset, size); set_bit_string(iommu_gart_bitmap, offset, size);
...@@ -120,44 +112,17 @@ static void free_iommu(unsigned long offset, int size) ...@@ -120,44 +112,17 @@ static void free_iommu(unsigned long offset, int size)
/* /*
* Use global flush state to avoid races with multiple flushers. * Use global flush state to avoid races with multiple flushers.
*/ */
static void flush_gart(struct device *dev) static void flush_gart(void)
{ {
unsigned long flags; unsigned long flags;
int flushed = 0;
int i, max;
spin_lock_irqsave(&iommu_bitmap_lock, flags); spin_lock_irqsave(&iommu_bitmap_lock, flags);
if (need_flush) { if (need_flush) {
max = 0; k8_flush_garts();
for (i = 0; i < MAX_NB; i++) {
if (!northbridges[i])
continue;
pci_write_config_dword(northbridges[i], 0x9c,
northbridge_flush_word[i] | 1);
flushed++;
max = i;
}
for (i = 0; i <= max; i++) {
u32 w;
if (!northbridges[i])
continue;
/* Make sure the hardware actually executed the flush. */
for (;;) {
pci_read_config_dword(northbridges[i], 0x9c, &w);
if (!(w & 1))
break;
cpu_relax();
}
}
if (!flushed)
printk("nothing to flush?\n");
need_flush = 0; need_flush = 0;
} }
spin_unlock_irqrestore(&iommu_bitmap_lock, flags); spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
} }
#ifdef CONFIG_IOMMU_LEAK #ifdef CONFIG_IOMMU_LEAK
#define SET_LEAK(x) if (iommu_leak_tab) \ #define SET_LEAK(x) if (iommu_leak_tab) \
...@@ -266,7 +231,7 @@ static dma_addr_t gart_map_simple(struct device *dev, char *buf, ...@@ -266,7 +231,7 @@ static dma_addr_t gart_map_simple(struct device *dev, char *buf,
size_t size, int dir) size_t size, int dir)
{ {
dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
flush_gart(dev); flush_gart();
return map; return map;
} }
...@@ -288,6 +253,28 @@ dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir) ...@@ -288,6 +253,28 @@ dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
return bus; return bus;
} }
/*
* Free a DMA mapping.
*/
void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction)
{
unsigned long iommu_page;
int npages;
int i;
if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
dma_addr >= iommu_bus_base + iommu_size)
return;
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
npages = to_pages(dma_addr, size);
for (i = 0; i < npages; i++) {
iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
CLEAR_LEAK(iommu_page + i);
}
free_iommu(iommu_page, npages);
}
/* /*
* Wrapper for pci_unmap_single working with scatterlists. * Wrapper for pci_unmap_single working with scatterlists.
*/ */
...@@ -299,7 +286,7 @@ void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int di ...@@ -299,7 +286,7 @@ void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int di
struct scatterlist *s = &sg[i]; struct scatterlist *s = &sg[i];
if (!s->dma_length || !s->length) if (!s->dma_length || !s->length)
break; break;
dma_unmap_single(dev, s->dma_address, s->dma_length, dir); gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
} }
} }
...@@ -329,7 +316,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, ...@@ -329,7 +316,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
s->dma_address = addr; s->dma_address = addr;
s->dma_length = s->length; s->dma_length = s->length;
} }
flush_gart(dev); flush_gart();
return nents; return nents;
} }
...@@ -436,13 +423,13 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) ...@@ -436,13 +423,13 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0) if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
goto error; goto error;
out++; out++;
flush_gart(dev); flush_gart();
if (out < nents) if (out < nents)
sg[out].dma_length = 0; sg[out].dma_length = 0;
return out; return out;
error: error:
flush_gart(NULL); flush_gart();
gart_unmap_sg(dev, sg, nents, dir); gart_unmap_sg(dev, sg, nents, dir);
/* When it was forced or merged try again in a dumb way */ /* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) { if (force_iommu || iommu_merge) {
...@@ -458,28 +445,6 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) ...@@ -458,28 +445,6 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
return 0; return 0;
} }
/*
* Free a DMA mapping.
*/
void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction)
{
unsigned long iommu_page;
int npages;
int i;
if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
dma_addr >= iommu_bus_base + iommu_size)
return;
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
npages = to_pages(dma_addr, size);
for (i = 0; i < npages; i++) {
iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
CLEAR_LEAK(iommu_page + i);
}
free_iommu(iommu_page, npages);
}
static int no_agp; static int no_agp;
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
...@@ -532,10 +497,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info) ...@@ -532,10 +497,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
void *gatt; void *gatt;
unsigned aper_base, new_aper_base; unsigned aper_base, new_aper_base;
unsigned aper_size, gatt_size, new_aper_size; unsigned aper_size, gatt_size, new_aper_size;
int i;
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
aper_size = aper_base = info->aper_size = 0; aper_size = aper_base = info->aper_size = 0;
for_all_nb(dev) { dev = NULL;
for (i = 0; i < num_k8_northbridges; i++) {
dev = k8_northbridges[i];
new_aper_base = read_aperture(dev, &new_aper_size); new_aper_base = read_aperture(dev, &new_aper_size);
if (!new_aper_base) if (!new_aper_base)
goto nommu; goto nommu;
...@@ -558,11 +526,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info) ...@@ -558,11 +526,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
panic("Cannot allocate GATT table"); panic("Cannot allocate GATT table");
memset(gatt, 0, gatt_size); memset(gatt, 0, gatt_size);
agp_gatt_table = gatt; agp_gatt_table = gatt;
for_all_nb(dev) { for (i = 0; i < num_k8_northbridges; i++) {
u32 ctl; u32 ctl;
u32 gatt_reg; u32 gatt_reg;
dev = k8_northbridges[i];
gatt_reg = __pa(gatt) >> 12; gatt_reg = __pa(gatt) >> 12;
gatt_reg <<= 4; gatt_reg <<= 4;
pci_write_config_dword(dev, 0x98, gatt_reg); pci_write_config_dword(dev, 0x98, gatt_reg);
...@@ -573,7 +542,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) ...@@ -573,7 +542,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
pci_write_config_dword(dev, 0x90, ctl); pci_write_config_dword(dev, 0x90, ctl);
} }
flush_gart(NULL); flush_gart();
printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
return 0; return 0;
...@@ -602,15 +571,19 @@ static struct dma_mapping_ops gart_dma_ops = { ...@@ -602,15 +571,19 @@ static struct dma_mapping_ops gart_dma_ops = {
.unmap_sg = gart_unmap_sg, .unmap_sg = gart_unmap_sg,
}; };
static int __init pci_iommu_init(void) void __init gart_iommu_init(void)
{ {
struct agp_kern_info info; struct agp_kern_info info;
unsigned long aper_size; unsigned long aper_size;
unsigned long iommu_start; unsigned long iommu_start;
struct pci_dev *dev;
unsigned long scratch; unsigned long scratch;
long i; long i;
if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
return;
}
#ifndef CONFIG_AGP_AMD64 #ifndef CONFIG_AGP_AMD64
no_agp = 1; no_agp = 1;
#else #else
...@@ -622,7 +595,11 @@ static int __init pci_iommu_init(void) ...@@ -622,7 +595,11 @@ static int __init pci_iommu_init(void)
#endif #endif
if (swiotlb) if (swiotlb)
return -1; return;
/* Did we detect a different HW IOMMU? */
if (iommu_detected && !iommu_aperture)
return;
if (no_iommu || if (no_iommu ||
(!force_iommu && end_pfn <= MAX_DMA32_PFN) || (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
...@@ -634,15 +611,7 @@ static int __init pci_iommu_init(void) ...@@ -634,15 +611,7 @@ static int __init pci_iommu_init(void)
"but IOMMU not available.\n" "but IOMMU not available.\n"
KERN_ERR "WARNING 32bit PCI may malfunction.\n"); KERN_ERR "WARNING 32bit PCI may malfunction.\n");
} }
return -1; return;
}
i = 0;
for_all_nb(dev)
i++;
if (i > MAX_NB) {
printk(KERN_ERR "PCI-GART: Too many northbridges (%ld). Disabled\n", i);
return -1;
} }
printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
...@@ -707,26 +676,10 @@ static int __init pci_iommu_init(void) ...@@ -707,26 +676,10 @@ static int __init pci_iommu_init(void)
for (i = EMERGENCY_PAGES; i < iommu_pages; i++) for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
iommu_gatt_base[i] = gart_unmapped_entry; iommu_gatt_base[i] = gart_unmapped_entry;
for_all_nb(dev) { flush_gart();
u32 flag;
int cpu = PCI_SLOT(dev->devfn) - 24;
if (cpu >= MAX_NB)
continue;
northbridges[cpu] = dev;
pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
northbridge_flush_word[cpu] = flag;
}
flush_gart(NULL);
dma_ops = &gart_dma_ops; dma_ops = &gart_dma_ops;
return 0;
} }
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
void gart_parse_options(char *p) void gart_parse_options(char *p)
{ {
int arg; int arg;
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/dma-mapping.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/dma.h> #include <asm/dma.h>
...@@ -12,10 +14,11 @@ static int ...@@ -12,10 +14,11 @@ static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{ {
if (hwdev && bus + size > *hwdev->dma_mask) { if (hwdev && bus + size > *hwdev->dma_mask) {
if (*hwdev->dma_mask >= 0xffffffffULL) if (*hwdev->dma_mask >= DMA_32BIT_MASK)
printk(KERN_ERR printk(KERN_ERR
"nommu_%s: overflow %Lx+%lu of device mask %Lx\n", "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
name, (long long)bus, size, (long long)*hwdev->dma_mask); name, (long long)bus, size,
(long long)*hwdev->dma_mask);
return 0; return 0;
} }
return 1; return 1;
......
...@@ -31,7 +31,7 @@ struct dma_mapping_ops swiotlb_dma_ops = { ...@@ -31,7 +31,7 @@ struct dma_mapping_ops swiotlb_dma_ops = {
void pci_swiotlb_init(void) void pci_swiotlb_init(void)
{ {
/* don't initialize swiotlb if iommu=off (no_iommu=1) */ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
if (!iommu_aperture && !no_iommu && if (!iommu_detected && !no_iommu &&
(end_pfn > MAX_DMA32_PFN || force_iommu)) (end_pfn > MAX_DMA32_PFN || force_iommu))
swiotlb = 1; swiotlb = 1;
if (swiotlb) { if (swiotlb) {
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
* Andi Kleen. * Andi Kleen.
* *
* CPU hotplug support - ashok.raj@intel.com * CPU hotplug support - ashok.raj@intel.com
* $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
*/ */
/* /*
...@@ -64,6 +63,7 @@ EXPORT_SYMBOL(boot_option_idle_override); ...@@ -64,6 +63,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
* Powermanagement idle function, if any.. * Powermanagement idle function, if any..
*/ */
void (*pm_idle)(void); void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state); static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
static ATOMIC_NOTIFIER_HEAD(idle_notifier); static ATOMIC_NOTIFIER_HEAD(idle_notifier);
...@@ -111,7 +111,7 @@ static void default_idle(void) ...@@ -111,7 +111,7 @@ static void default_idle(void)
{ {
local_irq_enable(); local_irq_enable();
clear_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status &= ~TS_POLLING;
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
while (!need_resched()) { while (!need_resched()) {
local_irq_disable(); local_irq_disable();
...@@ -120,7 +120,7 @@ static void default_idle(void) ...@@ -120,7 +120,7 @@ static void default_idle(void)
else else
local_irq_enable(); local_irq_enable();
} }
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
} }
/* /*
...@@ -203,8 +203,7 @@ static inline void play_dead(void) ...@@ -203,8 +203,7 @@ static inline void play_dead(void)
*/ */
void cpu_idle (void) void cpu_idle (void)
{ {
set_thread_flag(TIF_POLLING_NRFLAG); current_thread_info()->status |= TS_POLLING;
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
while (!need_resched()) { while (!need_resched()) {
...@@ -335,7 +334,7 @@ void show_regs(struct pt_regs *regs) ...@@ -335,7 +334,7 @@ void show_regs(struct pt_regs *regs)
{ {
printk("CPU %d:", smp_processor_id()); printk("CPU %d:", smp_processor_id());
__show_regs(regs); __show_regs(regs);
show_trace(&regs->rsp); show_trace(NULL, regs, (void *)(regs + 1));
} }
/* /*
...@@ -365,8 +364,11 @@ void flush_thread(void) ...@@ -365,8 +364,11 @@ void flush_thread(void)
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct thread_info *t = current_thread_info(); struct thread_info *t = current_thread_info();
if (t->flags & _TIF_ABI_PENDING) if (t->flags & _TIF_ABI_PENDING) {
t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32); t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
if (t->flags & _TIF_IA32)
current_thread_info()->status |= TS_COMPAT;
}
tsk->thread.debugreg0 = 0; tsk->thread.debugreg0 = 0;
tsk->thread.debugreg1 = 0; tsk->thread.debugreg1 = 0;
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
* Power off function, if any * Power off function, if any
*/ */
void (*pm_power_off)(void); void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
static long no_idt[3]; static long no_idt[3];
static enum { static enum {
......
This diff is collapsed.
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
* Copyright (C) 1995 Linus Torvalds * Copyright (C) 1995 Linus Torvalds
* Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen. * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
* See setup.c for older changelog. * See setup.c for older changelog.
* $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -31,6 +30,7 @@ char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,}; ...@@ -31,6 +30,7 @@ char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly; struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(_cpu_pda);
struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned; struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
...@@ -38,6 +38,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; ...@@ -38,6 +38,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
unsigned long __supported_pte_mask __read_mostly = ~0UL; unsigned long __supported_pte_mask __read_mostly = ~0UL;
EXPORT_SYMBOL(__supported_pte_mask);
static int do_not_nx __cpuinitdata = 0; static int do_not_nx __cpuinitdata = 0;
/* noexec=on|off /* noexec=on|off
......
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
* 2000-2002 x86-64 support by Andi Kleen * 2000-2002 x86-64 support by Andi Kleen
*
* $Id: signal.c,v 1.18 2001/10/17 22:30:37 ak Exp $
*/ */
#include <linux/sched.h> #include <linux/sched.h>
...@@ -239,7 +237,6 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size) ...@@ -239,7 +237,6 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
rsp = regs->rsp - 128; rsp = regs->rsp - 128;
/* This is the X/Open sanctioned signal stack switching. */ /* This is the X/Open sanctioned signal stack switching. */
/* RED-PEN: redzone on that stack? */
if (ka->sa.sa_flags & SA_ONSTACK) { if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(rsp) == 0) if (sas_ss_flags(rsp) == 0)
rsp = current->sas_ss_sp + current->sas_ss_size; rsp = current->sas_ss_sp + current->sas_ss_size;
......
...@@ -224,6 +224,7 @@ void flush_tlb_current_task(void) ...@@ -224,6 +224,7 @@ void flush_tlb_current_task(void)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL); flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(flush_tlb_current_task);
void flush_tlb_mm (struct mm_struct * mm) void flush_tlb_mm (struct mm_struct * mm)
{ {
...@@ -244,6 +245,7 @@ void flush_tlb_mm (struct mm_struct * mm) ...@@ -244,6 +245,7 @@ void flush_tlb_mm (struct mm_struct * mm)
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{ {
...@@ -266,6 +268,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) ...@@ -266,6 +268,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void* info) static void do_flush_tlb_all(void* info)
{ {
...@@ -443,6 +446,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, ...@@ -443,6 +446,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
spin_unlock(&call_lock); spin_unlock(&call_lock);
return 0; return 0;
} }
EXPORT_SYMBOL(smp_call_function);
void smp_stop_cpu(void) void smp_stop_cpu(void)
{ {
...@@ -460,7 +464,7 @@ static void smp_really_stop_cpu(void *dummy) ...@@ -460,7 +464,7 @@ static void smp_really_stop_cpu(void *dummy)
{ {
smp_stop_cpu(); smp_stop_cpu();
for (;;) for (;;)
asm("hlt"); halt();
} }
void smp_send_stop(void) void smp_send_stop(void)
...@@ -520,13 +524,13 @@ asmlinkage void smp_call_function_interrupt(void) ...@@ -520,13 +524,13 @@ asmlinkage void smp_call_function_interrupt(void)
int safe_smp_processor_id(void) int safe_smp_processor_id(void)
{ {
int apicid, i; unsigned apicid, i;
if (disable_apic) if (disable_apic)
return 0; return 0;
apicid = hard_smp_processor_id(); apicid = hard_smp_processor_id();
if (x86_cpu_to_apicid[apicid] == apicid) if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
return apicid; return apicid;
for (i = 0; i < NR_CPUS; ++i) { for (i = 0; i < NR_CPUS; ++i) {
......
...@@ -63,13 +63,11 @@ ...@@ -63,13 +63,11 @@
/* Number of siblings per CPU package */ /* Number of siblings per CPU package */
int smp_num_siblings = 1; int smp_num_siblings = 1;
/* Package ID of each logical CPU */ EXPORT_SYMBOL(smp_num_siblings);
u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
/* core ID of each logical CPU */
u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
/* Last level cache ID of each logical CPU */ /* Last level cache ID of each logical CPU */
u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
EXPORT_SYMBOL(cpu_llc_id);
/* Bitmask of currently online CPUs */ /* Bitmask of currently online CPUs */
cpumask_t cpu_online_map __read_mostly; cpumask_t cpu_online_map __read_mostly;
...@@ -82,18 +80,21 @@ EXPORT_SYMBOL(cpu_online_map); ...@@ -82,18 +80,21 @@ EXPORT_SYMBOL(cpu_online_map);
*/ */
cpumask_t cpu_callin_map; cpumask_t cpu_callin_map;
cpumask_t cpu_callout_map; cpumask_t cpu_callout_map;
EXPORT_SYMBOL(cpu_callout_map);
cpumask_t cpu_possible_map; cpumask_t cpu_possible_map;
EXPORT_SYMBOL(cpu_possible_map); EXPORT_SYMBOL(cpu_possible_map);
/* Per CPU bogomips and other parameters */ /* Per CPU bogomips and other parameters */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_data);
/* Set when the idlers are all forked */ /* Set when the idlers are all forked */
int smp_threads_ready; int smp_threads_ready;
/* representing HT siblings of each logical CPU */ /* representing HT siblings of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* representing HT and core siblings of each logical CPU */ /* representing HT and core siblings of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly; cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
...@@ -472,8 +473,8 @@ static inline void set_cpu_sibling_map(int cpu) ...@@ -472,8 +473,8 @@ static inline void set_cpu_sibling_map(int cpu)
if (smp_num_siblings > 1) { if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) { for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (phys_proc_id[cpu] == phys_proc_id[i] && if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
cpu_core_id[cpu] == cpu_core_id[i]) { c[cpu].cpu_core_id == c[i].cpu_core_id) {
cpu_set(i, cpu_sibling_map[cpu]); cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]); cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]); cpu_set(i, cpu_core_map[cpu]);
...@@ -500,7 +501,7 @@ static inline void set_cpu_sibling_map(int cpu) ...@@ -500,7 +501,7 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(i, c[cpu].llc_shared_map); cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map); cpu_set(cpu, c[i].llc_shared_map);
} }
if (phys_proc_id[cpu] == phys_proc_id[i]) { if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
cpu_set(i, cpu_core_map[cpu]); cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]); cpu_set(cpu, cpu_core_map[i]);
/* /*
...@@ -797,6 +798,8 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) ...@@ -797,6 +798,8 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
} }
alternatives_smp_switch(1);
c_idle.idle = get_idle_for_cpu(cpu); c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) { if (c_idle.idle) {
...@@ -1199,8 +1202,8 @@ static void remove_siblinginfo(int cpu) ...@@ -1199,8 +1202,8 @@ static void remove_siblinginfo(int cpu)
cpu_clear(cpu, cpu_sibling_map[sibling]); cpu_clear(cpu, cpu_sibling_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]); cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]); cpus_clear(cpu_core_map[cpu]);
phys_proc_id[cpu] = BAD_APICID; c[cpu].phys_proc_id = 0;
cpu_core_id[cpu] = BAD_APICID; c[cpu].cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map); cpu_clear(cpu, cpu_sibling_setup_map);
} }
...@@ -1259,6 +1262,8 @@ void __cpu_die(unsigned int cpu) ...@@ -1259,6 +1262,8 @@ void __cpu_die(unsigned int cpu)
/* They ack this in play_dead by setting CPU_DEAD */ /* They ack this in play_dead by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) == CPU_DEAD) { if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
printk ("CPU %d is now offline\n", cpu); printk ("CPU %d is now offline\n", cpu);
if (1 == num_online_cpus())
alternatives_smp_switch(0);
return; return;
} }
msleep(100); msleep(100);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include <asm-x86_64/k8.h>
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment