Commit 2ba74897 authored by Andi Kleen's avatar Andi Kleen Committed by David S. Miller

[PATCH] x86-64 merge

Make it compile again and various cleanups and a few bug fixes.  Only
changes x86-64 specific files.

Most of it are S3 suspend changes from Pavel and comment spelling fixes
from Steven Cole.

- Remove now obsolete check_cpu function
- Fix sys_ioctl prototype
- Small optimization - use SYSCALL for 32bit signal handling.
- Fix S3 suspend handling and split into individual files like i386 (Pavel)
- Merge from i386 (pci fixes etc.)
- Set correct paging attributes for IOMMU aperture
- Fix disable apic option
parent e7507c16
...@@ -327,89 +327,6 @@ void close_output_buffer_if_we_run_high(struct moveparams *mv) ...@@ -327,89 +327,6 @@ void close_output_buffer_if_we_run_high(struct moveparams *mv)
} }
} }
void check_cpu(void)
{
unsigned before, after, flags;
unsigned a,b,c,d;
int isamd;
/* check if the CPU supports CPUID. This is done by testing if the CPU
supports changing the ID bit (21) in EFLAGS. */
asm("pushfl ; "
"popl %0 ; " /* get EFLAGS */
"movl %0,%1 ; "
"xorl $(1<<21),%0 ; " /* toggle bit 21 */
"pushl %0 ; "
"popfl ; "
"pushfl ; " /* get EFLAGS again */
"popl %0 " : "=r" (after), "=r" (before));
if (before == after)
error("Your CPU doesn't support CPUID.");
/* check if it supports AMD extended cpuid reporting */
asm("cpuid" : "=a" (a), "=b" (b), "=c" (c), "=d" (d) : "0" (0x80000000));
if (a < 0x80000001)
error("Your CPU doesn't support AMD extended CPUIDs.");
/* AuthenticAMD */
isamd = (b == 0x68747541) && (d == 0x69746e65) && (c == 0x444d4163);
/* check required feature flags */
/* see http://www.x86-64.org/lists/discuss/msg02971.html */
#define REQUIRED_MASK1 ((1<<0)|(1<<3)|(1<<4)|(1<<5)|(1<<6)|(1<<8)|(1<<11)| \
(1<<13)|(1<<15)|(1<<24))
asm("cpuid" : "=d" (flags), "=a" (a) : "1" (0x80000001) : "ebx", "ecx");
flags &= REQUIRED_MASK1;
flags ^= REQUIRED_MASK1;
if (flags & (1<<9)) {
puts("WARNING: non APIC mode for long mode kernel is untested.");
puts("In case of trouble use 32bit kernel or enable APIC.");
}
if (flags & (1<<0))
error("CPU misses x87 FPU");
if (flags & (1<<3))
error("CPU doesn't support page size extension (PSE)");
if (flags & (1<<4))
error("CPU misses an time stamp counter");
if (flags & (1<<5))
error("CPU misses AMD style MSRs");
if (flags & (1<<6))
error("CPU misses physical address extension (PAE)");
if (flags & (1<<8))
error("CPU misses cmpxchg8");
if (flags & (1<<11))
error("CPU doesn't support SYSCALL/SYSRET");
if (flags & (1<<13))
error("CPU doesn't support PGE");
if (flags & (1<<15))
error("CPU doesn't support CMOV");
if (flags & (1<<24))
error("CPU doesn't support FXSAVE/FXRSTOR");
if (flags & (1<<29))
error("CPU doesn't support long mode");
#define SSE_MASK ((1<<25)|(1<<26))
asm("cpuid" : "=d" (flags), "=a" (a) : "1" (1) : "ebx", "ecx");
if ((flags & SSE_MASK) != SSE_MASK && isamd) {
/* Only try this on AMD CPUs. */
/* Enable SSE in HWCFG MSR */
asm volatile("rdmsr" : "=d" (d), "=a" (flags) : "c" (0xc0010015));
flags &= ~(1<<15);
asm volatile("wrmsr" :: "d" (d), "a" (flags), "c" (0xc0010015));
}
/* Try again */
asm("cpuid" : "=d" (flags), "=a" (a) : "1" (1) : "ebx", "ecx");
flags &= SSE_MASK;
flags ^= SSE_MASK;
if (flags & (1<<25))
error("CPU doesn't support SSE1");
if (flags & (1<<26))
error("CPU doesn't support SSE2");
}
int decompress_kernel(struct moveparams *mv, void *rmode) int decompress_kernel(struct moveparams *mv, void *rmode)
{ {
real_mode = rmode; real_mode = rmode;
......
/* /*
* Written 2000,2002 by Andi Kleen. * Written 2000,2002 by Andi Kleen.
* *
* Losely based on the sparc64 and IA64 32bit emulation loaders. * Loosely based on the sparc64 and IA64 32bit emulation loaders.
* This tricks binfmt_elf.c into loading 32bit binaries using lots * This tricks binfmt_elf.c into loading 32bit binaries using lots
* of ugly preprocessor tricks. Talk about very very poor man's inheritance. * of ugly preprocessor tricks. Talk about very very poor man's inheritance.
*/ */
......
...@@ -124,7 +124,7 @@ ...@@ -124,7 +124,7 @@
#define EXT2_IOC32_GETVERSION _IOR('v', 1, int) #define EXT2_IOC32_GETVERSION _IOR('v', 1, int)
#define EXT2_IOC32_SETVERSION _IOW('v', 2, int) #define EXT2_IOC32_SETVERSION _IOW('v', 2, int)
extern asmlinkage int sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); extern asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg) static int w_long(unsigned int fd, unsigned int cmd, unsigned long arg)
{ {
......
...@@ -247,6 +247,8 @@ asmlinkage long sys32_sigreturn(struct pt_regs regs) ...@@ -247,6 +247,8 @@ asmlinkage long sys32_sigreturn(struct pt_regs regs)
sigset_t set; sigset_t set;
unsigned int eax; unsigned int eax;
set_thread_flag(TIF_IRET);
if (verify_area(VERIFY_READ, frame, sizeof(*frame))) if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe; goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask) if (__get_user(set.sig[0], &frame->sc.oldmask)
...@@ -277,6 +279,8 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs regs) ...@@ -277,6 +279,8 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs regs)
stack_t st; stack_t st;
unsigned int eax; unsigned int eax;
set_thread_flag(TIF_IRET);
if (verify_area(VERIFY_READ, frame, sizeof(*frame))) if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
goto badframe; goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
......
...@@ -82,7 +82,7 @@ cstar_sysret: ...@@ -82,7 +82,7 @@ cstar_sysret:
GET_THREAD_INFO(%r10) GET_THREAD_INFO(%r10)
cli cli
testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10) testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
jnz int_ret_from_sys_call jnz 1f
RESTORE_ARGS 1,-ARG_SKIP,1,1 RESTORE_ARGS 1,-ARG_SKIP,1,1
movl RIP-ARGOFFSET(%rsp),%ecx movl RIP-ARGOFFSET(%rsp),%ecx
movl EFLAGS-ARGOFFSET(%rsp),%r11d movl EFLAGS-ARGOFFSET(%rsp),%r11d
...@@ -90,6 +90,10 @@ cstar_sysret: ...@@ -90,6 +90,10 @@ cstar_sysret:
swapgs swapgs
sysretl sysretl
1:
btc $TIF_IRET,threadinfo_flags(%r10)
jmp int_ret_from_sys_call
cstar_tracesys: cstar_tracesys:
SAVE_REST SAVE_REST
movq $-ENOSYS,RAX(%rsp) /* really needed? */ movq $-ENOSYS,RAX(%rsp) /* really needed? */
...@@ -121,7 +125,7 @@ cstar_badarg: ...@@ -121,7 +125,7 @@ cstar_badarg:
* Arguments are zero extended. For system calls that want sign extension and * Arguments are zero extended. For system calls that want sign extension and
* take long arguments a wrapper is needed. Most calls can just be called * take long arguments a wrapper is needed. Most calls can just be called
* directly. * directly.
* Assumes it is only called from user space and entered with interrups off. * Assumes it is only called from user space and entered with interrupts off.
*/ */
ENTRY(ia32_syscall) ENTRY(ia32_syscall)
......
...@@ -27,13 +27,13 @@ asm(" .code32\n" ...@@ -27,13 +27,13 @@ asm(" .code32\n"
"sig32_rt_tramp:\n" "sig32_rt_tramp:\n"
" movl $" __stringify(__NR_ia32_rt_sigreturn) ",%eax\n" " movl $" __stringify(__NR_ia32_rt_sigreturn) ",%eax\n"
" int $0x80\n" " syscall\n"
"sig32_rt_tramp_end:\n" "sig32_rt_tramp_end:\n"
"sig32_tramp:\n" "sig32_tramp:\n"
" popl %eax\n" " popl %eax\n"
" movl $" __stringify(__NR_ia32_sigreturn) ",%eax\n" " movl $" __stringify(__NR_ia32_sigreturn) ",%eax\n"
" int $0x80\n" " syscall\n"
"sig32_tramp_end:\n" "sig32_tramp_end:\n"
" .code64\n"); " .code64\n");
...@@ -44,7 +44,7 @@ extern unsigned char sig32_tramp[], sig32_tramp_end[]; ...@@ -44,7 +44,7 @@ extern unsigned char sig32_tramp[], sig32_tramp_end[];
char *syscall32_page; char *syscall32_page;
/* RED-PEN: This knows too much about high level VM */ /* RED-PEN: This knows too much about high level VM */
/* Alternative would be to generate a vma with appropiate backing options /* Alternative would be to generate a vma with appropriate backing options
and let it be handled by generic VM */ and let it be handled by generic VM */
int map_syscall32(struct mm_struct *mm, unsigned long address) int map_syscall32(struct mm_struct *mm, unsigned long address)
{ {
......
...@@ -300,7 +300,7 @@ acpi_boot_init (void) ...@@ -300,7 +300,7 @@ acpi_boot_init (void)
/* /*
* The default interrupt routing model is PIC (8259). This gets * The default interrupt routing model is PIC (8259). This gets
* overriden if IOAPICs are enumerated (below). * overridden if IOAPICs are enumerated (below).
*/ */
acpi_irq_model = ACPI_IRQ_MODEL_PIC; acpi_irq_model = ACPI_IRQ_MODEL_PIC;
...@@ -318,6 +318,9 @@ acpi_boot_init (void) ...@@ -318,6 +318,9 @@ acpi_boot_init (void)
} else } else
printk(KERN_NOTICE PREFIX "BIOS passes blacklist\n"); printk(KERN_NOTICE PREFIX "BIOS passes blacklist\n");
extern int disable_apic;
if (disable_apic)
return 0;
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
...@@ -345,7 +348,7 @@ acpi_boot_init (void) ...@@ -345,7 +348,7 @@ acpi_boot_init (void)
* Local APIC * Local APIC
* ---------- * ----------
* Note that the LAPIC address is obtained from the MADT (32-bit value) * Note that the LAPIC address is obtained from the MADT (32-bit value)
* and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). * and (optionally) overridden by a LAPIC_ADDR_OVR entry (64-bit value).
*/ */
result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr); result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr);
......
obj-$(CONFIG_ACPI_BOOT) := boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
/*
* acpi.c - Architecture-Specific Low-Level ACPI Support
*
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
* Copyright (C) 2002 Andi Kleen, SuSE Labs (x86-64 port)
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/acpi.h>
#include <asm/mpspec.h>
#include <asm/io.h>
#include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/io_apic.h>
#include <asm/proto.h>
#include <asm/tlbflush.h>
extern int acpi_disabled;
#define PREFIX "ACPI: "
/* --------------------------------------------------------------------------
Boot-time Configuration
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_BOOT
enum acpi_irq_model_id acpi_irq_model;
/* rely on all ACPI tables being in the direct mapping */
char *
__acpi_map_table (
unsigned long phys_addr,
unsigned long size)
{
if (!phys_addr || !size)
return NULL;
if (phys_addr < (end_pfn_map << PAGE_SHIFT))
return __va(phys_addr);
return NULL;
}
#ifdef CONFIG_X86_LOCAL_APIC
int acpi_lapic;
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
static int __init
acpi_parse_madt (
unsigned long phys_addr,
unsigned long size)
{
struct acpi_table_madt *madt = NULL;
if (!phys_addr || !size)
return -EINVAL;
madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size);
if (!madt) {
printk(KERN_WARNING PREFIX "Unable to map MADT\n");
return -ENODEV;
}
if (madt->lapic_address)
acpi_lapic_addr = (u64) madt->lapic_address;
printk(KERN_INFO PREFIX "Local APIC address 0x%08x\n",
madt->lapic_address);
return 0;
}
static int __init
acpi_parse_lapic (
acpi_table_entry_header *header)
{
struct acpi_table_lapic *processor = NULL;
processor = (struct acpi_table_lapic*) header;
if (!processor)
return -EINVAL;
acpi_table_print_madt_entry(header);
mp_register_lapic (
processor->id, /* APIC ID */
processor->flags.enabled); /* Enabled? */
return 0;
}
static int __init
acpi_parse_lapic_addr_ovr (
acpi_table_entry_header *header)
{
struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header;
if (!lapic_addr_ovr)
return -EINVAL;
acpi_lapic_addr = lapic_addr_ovr->address;
return 0;
}
static int __init
acpi_parse_lapic_nmi (
acpi_table_entry_header *header)
{
struct acpi_table_lapic_nmi *lapic_nmi = NULL;
lapic_nmi = (struct acpi_table_lapic_nmi*) header;
if (!lapic_nmi)
return -EINVAL;
acpi_table_print_madt_entry(header);
if (lapic_nmi->lint != 1)
printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
return 0;
}
#endif /*CONFIG_X86_LOCAL_APIC*/
#ifdef CONFIG_X86_IO_APIC
int acpi_ioapic;
static int __init
acpi_parse_ioapic (
acpi_table_entry_header *header)
{
struct acpi_table_ioapic *ioapic = NULL;
ioapic = (struct acpi_table_ioapic*) header;
if (!ioapic)
return -EINVAL;
acpi_table_print_madt_entry(header);
mp_register_ioapic (
ioapic->id,
ioapic->address,
ioapic->global_irq_base);
return 0;
}
static int __init
acpi_parse_int_src_ovr (
acpi_table_entry_header *header)
{
struct acpi_table_int_src_ovr *intsrc = NULL;
intsrc = (struct acpi_table_int_src_ovr*) header;
if (!intsrc)
return -EINVAL;
acpi_table_print_madt_entry(header);
mp_override_legacy_irq (
intsrc->bus_irq,
intsrc->flags.polarity,
intsrc->flags.trigger,
intsrc->global_irq);
return 0;
}
static int __init
acpi_parse_nmi_src (
acpi_table_entry_header *header)
{
struct acpi_table_nmi_src *nmi_src = NULL;
nmi_src = (struct acpi_table_nmi_src*) header;
if (!nmi_src)
return -EINVAL;
acpi_table_print_madt_entry(header);
/* TBD: Support nimsrc entries? */
return 0;
}
#endif /*CONFIG_X86_IO_APIC*/
#ifdef CONFIG_HPET_TIMER
static int __init
acpi_parse_hpet (
unsigned long phys_addr,
unsigned long size)
{
struct acpi_table_hpet *hpet_tbl;
hpet_tbl = __va(phys_addr);
if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
printk(KERN_WARNING "acpi: HPET timers must be located in memory.\n");
return -1;
}
hpet.address = hpet_tbl->addr.addrl | ((long) hpet_tbl->addr.addrh << 32);
printk(KERN_INFO "acpi: HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet.address);
return 0;
}
#endif
static unsigned long __init
acpi_scan_rsdp (
unsigned long start,
unsigned long length)
{
unsigned long offset = 0;
unsigned long sig_len = sizeof("RSD PTR ") - 1;
/*
* Scan all 16-byte boundaries of the physical memory region for the
* RSDP signature.
*/
for (offset = 0; offset < length; offset += 16) {
if (strncmp((char *) (start + offset), "RSD PTR ", sig_len))
continue;
return (start + offset);
}
return 0;
}
unsigned long __init
acpi_find_rsdp (void)
{
unsigned long rsdp_phys = 0;
/*
* Scan memory looking for the RSDP signature. First search EBDA (low
* memory) paragraphs and then search upper memory (E0000-FFFFF).
*/
rsdp_phys = acpi_scan_rsdp (0, 0x400);
if (!rsdp_phys)
rsdp_phys = acpi_scan_rsdp (0xE0000, 0xFFFFF);
return rsdp_phys;
}
int __init
acpi_boot_init (void)
{
int result = 0;
/*
* The default interrupt routing model is PIC (8259). This gets
* overriden if IOAPICs are enumerated (below).
*/
acpi_irq_model = ACPI_IRQ_MODEL_PIC;
/*
* Initialize the ACPI boot-time table parser.
*/
result = acpi_table_init();
if (result)
return result;
result = acpi_blacklisted();
if (result) {
acpi_disabled = 1;
return result;
} else
printk(KERN_NOTICE PREFIX "BIOS passes blacklist\n");
extern int disable_apic;
if (disable_apic)
return 0;
#ifdef CONFIG_X86_LOCAL_APIC
/*
* MADT
* ----
* Parse the Multiple APIC Description Table (MADT), if exists.
* Note that this table provides platform SMP configuration
* information -- the successor to MPS tables.
*/
result = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
if (!result) {
printk(KERN_WARNING PREFIX "MADT not present\n");
return 0;
}
else if (result < 0) {
printk(KERN_ERR PREFIX "Error parsing MADT\n");
return result;
}
else if (result > 1)
printk(KERN_WARNING PREFIX "Multiple MADT tables exist\n");
/*
* Local APIC
* ----------
* Note that the LAPIC address is obtained from the MADT (32-bit value)
* and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
*/
result = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr);
if (result < 0) {
printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
return result;
}
mp_register_lapic_address(acpi_lapic_addr);
result = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic);
if (!result) {
printk(KERN_ERR PREFIX "No LAPIC entries present\n");
/* TBD: Cleanup to allow fallback to MPS */
return -ENODEV;
}
else if (result < 0) {
printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return result;
}
result = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi);
if (result < 0) {
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return result;
}
acpi_lapic = 1;
#endif /*CONFIG_X86_LOCAL_APIC*/
#ifdef CONFIG_X86_IO_APIC
/*
* I/O APIC
* --------
*/
result = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic);
if (!result) {
printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
return -ENODEV;
}
else if (result < 0) {
printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
return result;
}
/* Build a default routing table for legacy (ISA) interrupts. */
mp_config_acpi_legacy_irqs();
result = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr);
if (result < 0) {
printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return result;
}
result = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src);
if (result < 0) {
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return result;
}
acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
acpi_ioapic = 1;
#endif /*CONFIG_X86_IO_APIC*/
#ifdef CONFIG_X86_LOCAL_APIC
if (acpi_lapic && acpi_ioapic)
smp_found_config = 1;
#endif
#ifdef CONFIG_HPET_TIMER
result = acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
if (result < 0)
printk("ACPI: no HPET table found (%d).\n", result);
#endif
return 0;
}
#endif /*CONFIG_ACPI_BOOT*/
/*
* acpi.c - Architecture-Specific Low-Level ACPI Support
*
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
* Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
* Copyright (C) 2002 Andi Kleen, SuSE Labs (x86-64 port)
* Copyright (C) 2003 Pavel Machek, SuSE Labs
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/acpi.h>
#include <asm/mpspec.h>
#include <asm/io.h>
#include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/io_apic.h>
#include <asm/proto.h>
#include <asm/tlbflush.h>
/* --------------------------------------------------------------------------
Low-Level Sleep Support
-------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_SLEEP
/* address in low memory of the wakeup routine. */
unsigned long acpi_wakeup_address = 0;
extern char wakeup_start, wakeup_end;
extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
static void init_low_mapping(void)
{
cpu_pda[0].level4_pgt[0] = cpu_pda[0].level4_pgt[pml4_index(PAGE_OFFSET)];
flush_tlb_all();
}
/**
* acpi_save_state_mem - save kernel state
*
* Create an identity mapped page table and copy the wakeup routine to
* low memory.
*/
int acpi_save_state_mem (void)
{
init_low_mapping();
memcpy((void *) acpi_wakeup_address, &wakeup_start, &wakeup_end - &wakeup_start);
acpi_copy_wakeup_routine(acpi_wakeup_address);
return 0;
}
/**
* acpi_save_state_disk - save kernel state to disk
*
*/
int acpi_save_state_disk (void)
{
return 1;
}
/*
* acpi_restore_state
*/
void acpi_restore_state_mem (void)
{
cpu_pda[0].level4_pgt[0] = 0;
flush_tlb_all();
}
/**
* acpi_reserve_bootmem - do _very_ early ACPI initialisation
*
* We allocate a page in low memory for the wakeup
* routine for when we come back from a sleep state. The
* runtime allocator allows specification of <16M pages, but not
* <1M pages.
*/
void __init acpi_reserve_bootmem(void)
{
acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
if ((&wakeup_end - &wakeup_start) > PAGE_SIZE)
printk(KERN_CRIT "ACPI: Wakeup code way too big, will crash on attempt to suspend\n");
printk(KERN_DEBUG "ACPI: have wakeup address 0x%8.8lx\n", acpi_wakeup_address);
}
#endif /*CONFIG_ACPI_SLEEP*/
void acpi_pci_link_exit(void) {}
.text
#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/msr.h>
# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
#
# wakeup_code runs in real mode, and at unknown address (determined at run-time).
# Therefore it must only use relative jumps/calls.
#
# Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
#
# If physical address of wakeup_code is 0x12345, BIOS should call us with
# cs = 0x1234, eip = 0x05
#
ALIGN
.align 16
ENTRY(wakeup_start)
wakeup_code:
wakeup_code_start = .
.code16
# Running in *copy* of this code, somewhere in low 1MB.
movb $0xa1, %al ; outb %al, $0x80
cli
cld
# setup data segment
movw %cs, %ax
movw %ax, %ds # Make ds:0 point to wakeup_start
movw %ax, %ss
mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board
pushl $0 # Kill any dangerous flags
popfl
movl real_magic - wakeup_code, %eax
cmpl $0x12345678, %eax
jne bogus_real_magic
lcall $0xc000,$3
movw $0xb800, %ax
movw %ax,%fs
movw $0x0e00 + 'L', %fs:(0x10)
movb $0xa2, %al ; outb %al, $0x80
lidt %ds:idt_48a - wakeup_code
xorl %eax, %eax
movw %ds, %ax # (Convert %ds:gdt to a linear ptr)
shll $4, %eax
addl $(gdta - wakeup_code), %eax
movl %eax, gdt_48a +2 - wakeup_code
lgdt %ds:gdt_48a - wakeup_code # load gdt with whatever is
# appropriate
movl $1, %eax # protected mode (PE) bit
lmsw %ax # This is it!
jmp 1f
1:
.byte 0x66, 0xea # prefix + jmpi-opcode
.long wakeup_32 - __START_KERNEL_map
.word __KERNEL_CS
.code32
wakeup_32:
# Running in this code, but at low address; paging is not yet turned on.
movb $0xa5, %al ; outb %al, $0x80
/* Check if extended functions are implemented */
movl $0x80000000, %eax
cpuid
cmpl $0x80000000, %eax
jbe bogus_cpu
wbinvd
mov $0x80000001, %eax
cpuid
btl $29, %edx
jnc bogus_cpu
movl %edx,%edi
movw $__KERNEL_DS, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
movw $__KERNEL_DS, %ax
movw %ax, %ss
mov $(wakeup_stack - __START_KERNEL_map), %esp
call 1f
1: popl %eax
movl $0xb8040, %ebx
call early_print
movl saved_magic - __START_KERNEL_map, %eax
cmpl $0x9abcdef0, %eax
jne bogus_32_magic
/*
* Prepare for entering 64bits mode
*/
/* Enable PAE mode and PGE */
xorl %eax, %eax
btsl $5, %eax
btsl $7, %eax
movl %eax, %cr4
/* Setup early boot stage 4 level pagetables */
#if 1
movl $(wakeup_level4_pgt - __START_KERNEL_map), %eax
#else
movl saved_cr3 - __START_KERNEL_map, %eax
#endif
movl %eax, %cr3
/* Setup EFER (Extended Feature Enable Register) */
movl $MSR_EFER, %ecx
rdmsr
/* Fool rdmsr and reset %eax to avoid dependences */
xorl %eax, %eax
/* Enable Long Mode */
btsl $_EFER_LME, %eax
/* Enable System Call */
btsl $_EFER_SCE, %eax
/* No Execute supported? */
btl $20,%edi
jnc 1f
btsl $_EFER_NX, %eax
1:
/* Make changes effective */
wrmsr
wbinvd
xorl %eax, %eax
btsl $31, %eax /* Enable paging and in turn activate Long Mode */
btsl $0, %eax /* Enable protected mode */
btsl $1, %eax /* Enable MP */
btsl $4, %eax /* Enable ET */
btsl $5, %eax /* Enable NE */
btsl $16, %eax /* Enable WP */
btsl $18, %eax /* Enable AM */
/* Make changes effective */
movl %eax, %cr0
/* At this point:
CR4.PAE must be 1
CS.L must be 0
CR3 must point to PML4
Next instruction must be a branch
This must be on identity-mapped page
*/
jmp reach_compatibility_mode
reach_compatibility_mode:
movw $0x0e00 + 'i', %ds:(0xb8012)
movb $0xa8, %al ; outb %al, $0x80;
/*
* At this point we're in long mode but in 32bit compatibility mode
* with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
* EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
*/
movw $0x0e00 + 'n', %ds:(0xb8014)
movb $0xa9, %al ; outb %al, $0x80
/* Load new GDT with the 64bit segment using 32bit descriptor */
movl $(pGDT32 - __START_KERNEL_map), %eax
lgdt (%eax)
movl $(wakeup_jumpvector - __START_KERNEL_map), %eax
/* Finally jump in 64bit mode */
ljmp *(%eax)
wakeup_jumpvector:
.long wakeup_long64 - __START_KERNEL_map
.word __KERNEL_CS
.code64
/* Hooray, we are in Long 64-bit mode (but still running in low memory) */
wakeup_long64:
/*
* We must switch to a new descriptor in kernel space for the GDT
* because soon the kernel won't have access anymore to the userspace
* addresses where we're currently running on. We have to do that here
* because in 32bit we couldn't load a 64bit linear address.
*/
lgdt cpu_gdt_descr - __START_KERNEL_map
movw $0x0e00 + 'u', %ds:(0xb8016)
nop
nop
movw $__KERNEL_DS, %ax
movw %ax, %ss
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
movq saved_esp, %rsp
movw $0x0e00 + 'x', %ds:(0xb8018)
movq saved_ebx, %rbx
movq saved_edi, %rdi
movq saved_esi, %rsi
movq saved_ebp, %rbp
movw $0x0e00 + '!', %ds:(0xb801a)
movq saved_eip, %rax
jmp %rax
.code32
early_print:
movl $16, %edx
1:
movl %eax, %ecx
andl $0xf, %ecx
shrl $4, %eax
addw $0x0e00 + '0', %ecx
movw %ecx, %ds:(%edx, %ebx)
decl %edx
decl %edx
jnz 1b
ret
.align 64
gdta:
.word 0, 0, 0, 0 # dummy
.word 0, 0, 0, 0 # unused
.word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb)
.word 0 # base address = 0
.word 0x9B00 # code read/exec. ??? Why I need 0x9B00 (as opposed to 0x9A00 in order for this to work?)
.word 0x00CF # granularity = 4096, 386
# (+5th nibble of limit)
.word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb)
.word 0 # base address = 0
.word 0x9200 # data read/write
.word 0x00CF # granularity = 4096, 386
# (+5th nibble of limit)
# this is 64bit descriptor for code
.word 0xFFFF
.word 0
.word 0x9A00 # code read/exec
.word 0x00AF # as above, but it is long mode and with D=0
idt_48a:
.word 0 # idt limit = 0
.word 0, 0 # idt base = 0L
gdt_48a:
.word 0x8000 # gdt limit=2048,
# 256 GDT entries
.word 0, 0 # gdt base (filled in later)
real_save_gdt: .word 0
.quad 0
real_magic: .quad 0
video_mode: .quad 0
bogus_real_magic:
movb $0xba,%al ; outb %al,$0x80
jmp bogus_real_magic
bogus_32_magic:
movb $0xb3,%al ; outb %al,$0x80
jmp bogus_32_magic
bogus_31_magic:
movb $0xb1,%al ; outb %al,$0x80
jmp bogus_31_magic
bogus_cpu:
movb $0xbc,%al ; outb %al,$0x80
jmp bogus_cpu
/* This code uses an extended set of video mode numbers. These include:
* Aliases for standard modes
* NORMAL_VGA (-1)
* EXTENDED_VGA (-2)
* ASK_VGA (-3)
* Video modes numbered by menu position -- NOT RECOMMENDED because of lack
* of compatibility when extending the table. These are between 0x00 and 0xff.
*/
#define VIDEO_FIRST_MENU 0x0000
/* Standard BIOS video modes (BIOS number + 0x0100) */
#define VIDEO_FIRST_BIOS 0x0100
/* VESA BIOS video modes (VESA number + 0x0200) */
#define VIDEO_FIRST_VESA 0x0200
/* Video7 special modes (BIOS number + 0x0900) */
#define VIDEO_FIRST_V7 0x0900
# Setting of user mode (AX=mode ID) => CF=success
mode_seta:
movw %ax, %bx
#if 0
cmpb $0xff, %ah
jz setalias
testb $VIDEO_RECALC>>8, %ah
jnz _setrec
cmpb $VIDEO_FIRST_RESOLUTION>>8, %ah
jnc setres
cmpb $VIDEO_FIRST_SPECIAL>>8, %ah
jz setspc
cmpb $VIDEO_FIRST_V7>>8, %ah
jz setv7
#endif
cmpb $VIDEO_FIRST_VESA>>8, %ah
jnc check_vesaa
#if 0
orb %ah, %ah
jz setmenu
#endif
decb %ah
# jz setbios Add bios modes later
setbada: clc
ret
check_vesaa:
subb $VIDEO_FIRST_VESA>>8, %bh
orw $0x4000, %bx # Use linear frame buffer
movw $0x4f02, %ax # VESA BIOS mode set call
int $0x10
cmpw $0x004f, %ax # AL=4f if implemented
jnz _setbada # AH=0 if OK
stc
ret
_setbada: jmp setbada
.code64
bogus_magic:
movw $0x0e00 + 'B', %ds:(0xb8018)
jmp bogus_magic
bogus_magic2:
movw $0x0e00 + '2', %ds:(0xb8018)
jmp bogus_magic2
wakeup_stack_begin: # Stack grows down
.org 0xff0
wakeup_stack: # Just below end of page
ENTRY(wakeup_end)
##
# acpi_copy_wakeup_routine
#
# Copy the above routine to low memory.
#
# Parameters:
# %rdi: place to copy wakeup routine to
#
# Returned address is location of code in low memory (past data and stack)
#
ENTRY(acpi_copy_wakeup_routine)
pushq %rax
pushq %rcx
pushq %rdx
sgdt saved_gdt
sidt saved_idt
sldt saved_ldt
str saved_tss
movq %cr3, %rdx
movq %rdx, saved_cr3
movq %cr4, %rdx
movq %rdx, saved_cr4
movq %cr0, %rdx
movq %rdx, saved_cr0
sgdt real_save_gdt - wakeup_start (,%rdi)
movl $MSR_EFER, %ecx
rdmsr
movl %eax, saved_efer
movl %edx, saved_efer2
# movq saved_videomode, %rdx # FIXME: videomode
movq %rdx, video_mode - wakeup_start (,%rdi)
movq $0x12345678, real_magic - wakeup_start (,%rdi)
movq $0x123456789abcdef0, %rdx
movq %rdx, saved_magic
movl saved_magic - __START_KERNEL_map, %eax
cmpl $0x9abcdef0, %eax
jne bogus_32_magic
# make sure %cr4 is set correctly (features, etc)
movl saved_cr4 - __START_KERNEL_map, %eax
movq %rax, %cr4
movl saved_cr0 - __START_KERNEL_map, %eax
movq %rax, %cr0
jmp 1f # Flush pipelines
1:
# restore the regs we used
popq %rdx
popq %rcx
popq %rax
ENTRY(do_suspend_lowlevel_s4bios)
ret
.align 2
.p2align 4,,15
.globl do_suspend_lowlevel
.type do_suspend_lowlevel,@function
do_suspend_lowlevel:
.LFB5:
subq $8, %rsp
.LCFI2:
testl %edi, %edi
jne .L99
xorl %eax, %eax
call save_processor_state
movq %rsp, saved_context_esp(%rip)
movq %rax, saved_context_eax(%rip)
movq %rbx, saved_context_ebx(%rip)
movq %rcx, saved_context_ecx(%rip)
movq %rdx, saved_context_edx(%rip)
movq %rbp, saved_context_ebp(%rip)
movq %rsi, saved_context_esi(%rip)
movq %rdi, saved_context_edi(%rip)
movq %r8, saved_context_r08(%rip)
movq %r9, saved_context_r09(%rip)
movq %r10, saved_context_r10(%rip)
movq %r11, saved_context_r11(%rip)
movq %r12, saved_context_r12(%rip)
movq %r13, saved_context_r13(%rip)
movq %r14, saved_context_r14(%rip)
movq %r15, saved_context_r15(%rip)
pushfq ; popq saved_context_eflags(%rip)
movq $.L97, saved_eip(%rip)
movq %rsp,saved_esp
movq %rbp,saved_ebp
movq %rbx,saved_ebx
movq %rdi,saved_edi
movq %rsi,saved_esi
addq $8, %rsp
movl $3, %edi
xorl %eax, %eax
jmp acpi_enter_sleep_state
.L97:
.p2align 4,,7
.L99:
.align 4
movl $24, %eax
movw %ax, %ds
movq saved_context+58(%rip), %rax
movq %rax, %cr4
movq saved_context+50(%rip), %rax
movq %rax, %cr3
movq saved_context+42(%rip), %rax
movq %rax, %cr2
movq saved_context+34(%rip), %rax
movq %rax, %cr0
pushq saved_context_eflags(%rip) ; popfq
movq saved_context_esp(%rip), %rsp
movq saved_context_ebp(%rip), %rbp
movq saved_context_eax(%rip), %rax
movq saved_context_ebx(%rip), %rbx
movq saved_context_ecx(%rip), %rcx
movq saved_context_edx(%rip), %rdx
movq saved_context_esi(%rip), %rsi
movq saved_context_edi(%rip), %rdi
movq saved_context_r08(%rip), %r8
movq saved_context_r09(%rip), %r9
movq saved_context_r10(%rip), %r10
movq saved_context_r11(%rip), %r11
movq saved_context_r12(%rip), %r12
movq saved_context_r13(%rip), %r13
movq saved_context_r14(%rip), %r14
movq saved_context_r15(%rip), %r15
xorl %eax, %eax
addq $8, %rsp
jmp restore_processor_state
.LFE5:
.Lfe5:
.size do_suspend_lowlevel,.Lfe5-do_suspend_lowlevel
.data
ALIGN
ENTRY(saved_ebp) .quad 0
ENTRY(saved_esi) .quad 0
ENTRY(saved_edi) .quad 0
ENTRY(saved_ebx) .quad 0
ENTRY(saved_eip) .quad 0
ENTRY(saved_esp) .quad 0
ENTRY(saved_magic) .quad 0
ALIGN
# saved registers
saved_gdt: .quad 0,0
saved_idt: .quad 0,0
saved_ldt: .quad 0
saved_tss: .quad 0
saved_cr0: .quad 0
saved_cr3: .quad 0
saved_cr4: .quad 0
saved_efer: .quad 0
saved_efer2: .quad 0
...@@ -376,7 +376,7 @@ void __init setup_local_APIC (void) ...@@ -376,7 +376,7 @@ void __init setup_local_APIC (void)
* Set up LVT0, LVT1: * Set up LVT0, LVT1:
* *
* set up through-local-APIC on the BP's LINT0. This is not * set up through-local-APIC on the BP's LINT0. This is not
* strictly necessery in pure symmetric-IO mode, but sometimes * strictly necessary in pure symmetric-IO mode, but sometimes
* we delegate interrupts to the 8259A. * we delegate interrupts to the 8259A.
*/ */
/* /*
...@@ -935,7 +935,7 @@ void smp_local_timer_interrupt(struct pt_regs *regs) ...@@ -935,7 +935,7 @@ void smp_local_timer_interrupt(struct pt_regs *regs)
/* /*
* We take the 'long' return path, and there every subsystem * We take the 'long' return path, and there every subsystem
* grabs the apropriate locks (kernel lock/ irq lock). * grabs the appropriate locks (kernel lock/ irq lock).
* *
* we might want to decouple profiling from the 'long path', * we might want to decouple profiling from the 'long path',
* and do the profiling totally in assembly. * and do the profiling totally in assembly.
......
...@@ -64,6 +64,7 @@ static inline void do_cpuid(int cpu, u32 reg, u32 *data) ...@@ -64,6 +64,7 @@ static inline void do_cpuid(int cpu, u32 reg, u32 *data)
{ {
struct cpuid_command cmd; struct cpuid_command cmd;
preempt_disable();
if ( cpu == smp_processor_id() ) { if ( cpu == smp_processor_id() ) {
cpuid(reg, &data[0], &data[1], &data[2], &data[3]); cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
} else { } else {
...@@ -73,6 +74,7 @@ static inline void do_cpuid(int cpu, u32 reg, u32 *data) ...@@ -73,6 +74,7 @@ static inline void do_cpuid(int cpu, u32 reg, u32 *data)
smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1); smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1);
} }
preempt_enable();
} }
#else /* ! CONFIG_SMP */ #else /* ! CONFIG_SMP */
......
...@@ -54,7 +54,7 @@ static struct console early_vga_console = { ...@@ -54,7 +54,7 @@ static struct console early_vga_console = {
.index = -1, .index = -1,
}; };
/* Serial functions losely based on a similar package from Klaus P. Gerlicher */ /* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
int early_serial_base = 0x3f8; /* ttyS0 */ int early_serial_base = 0x3f8; /* ttyS0 */
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
.text .text
.code32 .code32
/* %bx: 1 if comming from smp trampoline on secondary cpu */ /* %bx: 1 if coming from smp trampoline on secondary cpu */
startup_32: startup_32:
/* /*
...@@ -194,7 +194,8 @@ ENTRY(no_long_mode) ...@@ -194,7 +194,8 @@ ENTRY(no_long_mode)
jmp 1b jmp 1b
.org 0xf00 .org 0xf00
ENTRY(pGDT32): .globl pGDT32
pGDT32:
.word gdt32_end-gdt_table32 .word gdt32_end-gdt_table32
.long gdt_table32-__START_KERNEL_map .long gdt_table32-__START_KERNEL_map
......
...@@ -49,7 +49,7 @@ void __init fpu_init(void) ...@@ -49,7 +49,7 @@ void __init fpu_init(void)
/* /*
* The _current_ task is using the FPU for the first time * The _current_ task is using the FPU for the first time
* so initialize it and set the mxcsr to its default. * so initialize it and set the mxcsr to its default.
* remeber the current task has used the FPU. * remember the current task has used the FPU.
*/ */
void init_fpu(struct task_struct *child) void init_fpu(struct task_struct *child)
{ {
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
* Linux has a controller-independent x86 interrupt architecture. * Linux has a controller-independent x86 interrupt architecture.
* every controller has a 'controller-template', that is used * every controller has a 'controller-template', that is used
* by the main code to do the right thing. Each driver-visible * by the main code to do the right thing. Each driver-visible
* interrupt source is transparently wired to the apropriate * interrupt source is transparently wired to the appropriate
* controller. Thus drivers need not be aware of the * controller. Thus drivers need not be aware of the
* interrupt-controller. * interrupt-controller.
* *
...@@ -688,7 +688,7 @@ unsigned int probe_irq_mask(unsigned long val) ...@@ -688,7 +688,7 @@ unsigned int probe_irq_mask(unsigned long val)
* The interrupt probe logic state is returned to its previous * The interrupt probe logic state is returned to its previous
* value. * value.
* *
* BUGS: When used in a module (which arguably shouldnt happen) * BUGS: When used in a module (which arguably shouldn't happen)
* nothing prevents two IRQ probe callers from overlapping. The * nothing prevents two IRQ probe callers from overlapping. The
* results of this are non-optimal. * results of this are non-optimal.
*/ */
......
...@@ -154,7 +154,7 @@ static int read_ldt(void * ptr, unsigned long bytecount) ...@@ -154,7 +154,7 @@ static int read_ldt(void * ptr, unsigned long bytecount)
static int read_default_ldt(void * ptr, unsigned long bytecount) static int read_default_ldt(void * ptr, unsigned long bytecount)
{ {
/* Arbitary number */ /* Arbitrary number */
/* x86-64 default LDT is all zeros */ /* x86-64 default LDT is all zeros */
if (bytecount > 128) if (bytecount > 128)
bytecount = 128; bytecount = 128;
......
/* /*
* Intel Multiprocessor Specificiation 1.1 and 1.4 * Intel Multiprocessor Specification 1.1 and 1.4
* compliant MP-table parsing routines. * compliant MP-table parsing routines.
* *
* (c) 1995 Alan Cox, Building #3 <alan@redhat.com> * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
...@@ -849,7 +849,7 @@ void __init mp_config_acpi_legacy_irqs (void) ...@@ -849,7 +849,7 @@ void __init mp_config_acpi_legacy_irqs (void)
/* /*
* Use the default configuration for the IRQs 0-15. These may be * Use the default configuration for the IRQs 0-15. These may be
* overriden by (MADT) interrupt source override entries. * overridden by (MADT) interrupt source override entries.
*/ */
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
......
...@@ -120,8 +120,11 @@ static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx) ...@@ -120,8 +120,11 @@ static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
{ {
struct msr_command cmd; struct msr_command cmd;
preempt_disable();
if ( cpu == smp_processor_id() ) { if ( cpu == smp_processor_id() ) {
return wrmsr_eio(reg, eax, edx); int ret = wrmsr_eio(reg, eax, edx);
preempt_enable();
return ret;
} else { } else {
cmd.cpu = cpu; cmd.cpu = cpu;
cmd.reg = reg; cmd.reg = reg;
...@@ -129,6 +132,7 @@ static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx) ...@@ -129,6 +132,7 @@ static inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
cmd.data[1] = edx; cmd.data[1] = edx;
smp_call_function(msr_smp_wrmsr, &cmd, 1, 1); smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
preempt_enable();
return cmd.err; return cmd.err;
} }
} }
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
dma_addr_t bad_dma_address = -1UL; dma_addr_t bad_dma_address = -1UL;
/* Map a set of buffers described by scatterlist in streaming /* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the * mode for DMA. This is the scatter-gather version of the
* above pci_map_single interface. Here the scatter gather list * above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address * elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG). * and length. They are obtained via sg_dma_{address,length}(SG).
......
...@@ -437,8 +437,6 @@ static __init int init_k8_gatt(agp_kern_info *info) ...@@ -437,8 +437,6 @@ static __init int init_k8_gatt(agp_kern_info *info)
if (!gatt) if (!gatt)
panic("Cannot allocate GATT table"); panic("Cannot allocate GATT table");
memset(gatt, 0, gatt_size); memset(gatt, 0, gatt_size);
change_page_attr(virt_to_page(gatt), gatt_size/PAGE_SIZE, PAGE_KERNEL_NOCACHE);
global_flush_tlb();
agp_gatt_table = gatt; agp_gatt_table = gatt;
for_all_nb(dev) { for_all_nb(dev) {
...@@ -538,7 +536,8 @@ void __init pci_iommu_init(void) ...@@ -538,7 +536,8 @@ void __init pci_iommu_init(void)
iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
bad_dma_address = iommu_bus_base; bad_dma_address = iommu_bus_base;
asm volatile("wbinvd" ::: "memory"); change_page_attr(virt_to_page(__va(iommu_start)), iommu_pages, PAGE_KERNEL);
global_flush_tlb();
} }
/* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]] /* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]]
......
...@@ -451,7 +451,7 @@ static int __init init_amd(struct cpuinfo_x86 *c) ...@@ -451,7 +451,7 @@ static int __init init_amd(struct cpuinfo_x86 *c)
if (!r) { if (!r) {
switch (c->x86) { switch (c->x86) {
case 15: case 15:
/* Should distingush Models here, but this is only /* Should distinguish Models here, but this is only
a fallback anyways. */ a fallback anyways. */
strcpy(c->x86_model_id, "Hammer"); strcpy(c->x86_model_id, "Hammer");
break; break;
......
...@@ -160,7 +160,7 @@ static inline void leave_mm (unsigned long cpu) ...@@ -160,7 +160,7 @@ static inline void leave_mm (unsigned long cpu)
* 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask); * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
* Stop ipi delivery for the old mm. This is not synchronized with * Stop ipi delivery for the old mm. This is not synchronized with
* the other cpus, but smp_invalidate_interrupt ignore flush ipis * the other cpus, but smp_invalidate_interrupt ignore flush ipis
* for the wrong mm, and in the worst case we perform a superflous * for the wrong mm, and in the worst case we perform a superfluous
* tlb flush. * tlb flush.
* 1a2) set cpu mmu_state to TLBSTATE_OK * 1a2) set cpu mmu_state to TLBSTATE_OK
* Now the smp_invalidate_interrupt won't call leave_mm if cpu0 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
...@@ -250,7 +250,7 @@ static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, ...@@ -250,7 +250,7 @@ static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
BUG(); BUG();
/* /*
* i'm not happy about this global shared spinlock in the * I'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is. * MM hot path, but we'll see how contended it is.
* Temporarily this turns IRQs off, so that lockups are * Temporarily this turns IRQs off, so that lockups are
* detected by the NMI watchdog. * detected by the NMI watchdog.
......
...@@ -121,7 +121,11 @@ void fix_processor_context(void) ...@@ -121,7 +121,11 @@ void fix_processor_context(void)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct tss_struct * t = init_tss + cpu; struct tss_struct * t = init_tss + cpu;
printk("Should fix processor context!\n"); set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
((struct n_desc_struct *) &cpu_gdt_table[cpu][GDT_ENTRY_TSS])->b &= 0xfffffdff;
syscall_init(); /* This sets MSR_*STAR and related */
load_TR_desc(); /* This does ltr */
load_LDT(&current->mm->context); /* This does lldt */ load_LDT(&current->mm->context); /* This does lldt */
/* /*
......
...@@ -323,14 +323,13 @@ int die_owner = -1; ...@@ -323,14 +323,13 @@ int die_owner = -1;
void die(const char * str, struct pt_regs * regs, long err) void die(const char * str, struct pt_regs * regs, long err)
{ {
int cpu;
struct die_args args = { regs, str, err };
static int die_counter; static int die_counter;
int cpu;
console_verbose(); console_verbose();
notifier_call_chain(&die_chain, DIE_DIE, &args);
bust_spinlocks(1); bust_spinlocks(1);
handle_BUG(regs); handle_BUG(regs);
printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); printk(KERN_EMERG "%s: %04lx [%u]\n", str, err & 0xffff, ++die_counter);
notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
cpu = safe_smp_processor_id(); cpu = safe_smp_processor_id();
/* racy, but better than risking deadlock. */ /* racy, but better than risking deadlock. */
local_irq_disable(); local_irq_disable();
...@@ -662,7 +661,7 @@ void math_error(void *rip) ...@@ -662,7 +661,7 @@ void math_error(void *rip)
* C1 reg you need in case of a stack fault, 0x040 is the stack * C1 reg you need in case of a stack fault, 0x040 is the stack
* fault bit. We should only be taking one exception at a time, * fault bit. We should only be taking one exception at a time,
* so if this combination doesn't produce any single exception, * so if this combination doesn't produce any single exception,
* then we have a bad program that isn't syncronizing its FPU usage * then we have a bad program that isn't synchronizing its FPU usage
* and it will suffer the consequences since we won't be able to * and it will suffer the consequences since we won't be able to
* fully reproduce the context of the exception * fully reproduce the context of the exception
*/ */
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Copyright 2003 by Andi Kleen, SuSE Labs. * Copyright 2003 by Andi Kleen, SuSE Labs.
* *
* Long mode entry losely based on example code in chapter 14 of the x86-64 system * Long mode entry loosely based on example code in chapter 14 of the x86-64 system
* programmer's manual. * programmer's manual.
* *
* Notebook: * Notebook:
......
...@@ -58,7 +58,7 @@ bad_to_user: ...@@ -58,7 +58,7 @@ bad_to_user:
* rdx count * rdx count
* *
* Output: * Output:
* eax uncopied bytes or 0 if successfull. * eax uncopied bytes or 0 if successful.
*/ */
.globl copy_user_generic .globl copy_user_generic
copy_user_generic: copy_user_generic:
......
...@@ -83,6 +83,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) ...@@ -83,6 +83,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
limit >>= 16; limit >>= 16;
limit <<= 24; limit <<= 24;
limit |= (1<<24)-1;
if (limit > end_pfn_map << PAGE_SHIFT) if (limit > end_pfn_map << PAGE_SHIFT)
limit = end_pfn_map << PAGE_SHIFT; limit = end_pfn_map << PAGE_SHIFT;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/cache.h>
#include "pci.h" #include "pci.h"
...@@ -94,6 +95,8 @@ static void __devinit pcibios_fixup_ghosts(struct pci_bus *b) ...@@ -94,6 +95,8 @@ static void __devinit pcibios_fixup_ghosts(struct pci_bus *b)
} }
} }
struct pbus_set_ranges_data;
void __devinit void __devinit
pcibios_fixup_pbus_ranges (struct pci_bus *bus, struct pbus_set_ranges_data *ranges) pcibios_fixup_pbus_ranges (struct pci_bus *bus, struct pbus_set_ranges_data *ranges)
{ {
...@@ -129,6 +132,8 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum) ...@@ -129,6 +132,8 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
return pci_scan_bus(busnum, pci_root_ops, NULL); return pci_scan_bus(busnum, pci_root_ops, NULL);
} }
extern u8 pci_cache_line_size;
static int __init pcibios_init(void) static int __init pcibios_init(void)
{ {
if (!pci_root_ops) { if (!pci_root_ops) {
...@@ -136,6 +141,8 @@ static int __init pcibios_init(void) ...@@ -136,6 +141,8 @@ static int __init pcibios_init(void)
return 0; return 0;
} }
pci_cache_line_size = boot_cpu_data.x86_clflush_size >> 2;
pcibios_resource_survey(); pcibios_resource_survey();
#ifdef CONFIG_GART_IOMMU #ifdef CONFIG_GART_IOMMU
......
...@@ -101,6 +101,7 @@ static inline struct thread_info *stack_thread_info(void) ...@@ -101,6 +101,7 @@ static inline struct thread_info *stack_thread_info(void)
#define TIF_SIGPENDING 2 /* signal pending */ #define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
#define TIF_IRET 5 /* force IRET */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_IA32 17 /* 32bit process */ #define TIF_IA32 17 /* 32bit process */
...@@ -109,6 +110,7 @@ static inline struct thread_info *stack_thread_info(void) ...@@ -109,6 +110,7 @@ static inline struct thread_info *stack_thread_info(void)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_IRET (1<<TIF_IRET)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_IA32 (1<<TIF_IA32) #define _TIF_IA32 (1<<TIF_IA32)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment