Commit f536a213 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] Fix x86-64 loose ends

 - Make MTRR driver compatible with 2.4/x86-64 again.  This fixes an
   endless loop in the XFree86 4.3pre server.
 - Fix the boot code that rejected earlier: rewrote early CPU detection
   and SSE is forced now
 - Always force inlining in vsyscalls
parent c7225c14
...@@ -329,84 +329,46 @@ void close_output_buffer_if_we_run_high(struct moveparams *mv) ...@@ -329,84 +329,46 @@ void close_output_buffer_if_we_run_high(struct moveparams *mv)
void check_cpu(void) void check_cpu(void)
{ {
int res = 0; unsigned before, after, flags;
int tmp, flags; unsigned a,b,c,d;
int isamd;
asm volatile( " \n\
movl $3,%%edx # at least 386 \n\ /* check if the CPU supports CPUID. This is done by testing if the CPU
pushfl # push EFLAGS \n\ supports changing the ID bit (21) in EFLAGS. */
popl %%eax # get EFLAGS \n\ asm("pushfl ; "
movl %%eax,%%ecx # save original EFLAGS \n\ "popl %0 ; " /* get EFLAGS */
xorl $0x40000,%%eax # flip AC bit in EFLAGS \n\ "movl %0,%1 ; "
pushl %%eax # copy to EFLAGS \n\ "xorl $(1<<21),%0 ; " /* toggle bit 21 */
popfl # set EFLAGS \n\ "pushl %0 ; "
pushfl # get new EFLAGS \n\ "popfl ; "
popl %%eax # put it in eax \n\ "pushfl ; " /* get EFLAGS again */
xorl %%ecx,%%eax # change in flags \n\ "popl %0 " : "=r" (after), "=r" (before));
andl $0x40000,%%eax # check if AC bit changed \n\ if (before == after)
je 1f \n\ error("Your CPU doesn't support CPUID.");
\n\
movl $4,%%edx # at least 486 \n\ /* check if it supports AMD extended cpuid reporting */
movl %%ecx,%%eax \n\ asm("cpuid" : "=a" (a), "=b" (b), "=c" (c), "=d" (d) : "0" (0x80000000));
xorl $0x200000,%%eax # check ID flag \n\
pushl %%eax \n\ if (a < 0x80000001)
popfl # if we are on a straight 486DX, SX, or \n\ error("Your CPU doesn't support AMD extended CPUIDs.");
pushfl # 487SX we can't change it \n\
popl %%eax \n\ /* AuthenticAMD */
xorl %%ecx,%%eax \n\ isamd = (b == 0x68747541) && (d == 0x69746e65) && (c == 0x444d4163);
pushl %%ecx # restore original EFLAGS \n\
popfl \n\
andl $0x200000,%%eax \n\
je 1f \n\
\n\
/* get vendor info */ \n\
# xorl %%eax,%%eax # call CPUID with 0 -> return vendor ID \n\
# cpuid \n\
# movl $5, %%edx \n\
# cmpl $0x41757468,%%ebx # check thats amd \n\
# jne 1f \n\
\n\
mov $0x80000000,%%eax # Is extended cpuid supported?\n\
cpuid\n\
test $0x80000000,%%eax\n\
movl $5, %%edx \n\
jz 1f\n\
\n\
movl $0x80000001,%%eax \n\
cpuid \n\
andl $0x20000000,%%edx \n\
movl $6, %%edx \n\
jz 1f \n\
\n\
movl $7, %%edx \n\
1:" : "=d" (res) : : "eax", "ebx", "ecx" );
switch (res) {
case 3: puts( "386" );
break;
case 4: puts( "486" );
break;
case 5: puts( "no extended cpuid" );
break;
case 6: puts( "non-64bit 586+" );
break;
case 7: puts( "64bit" );
break;
default:puts( "internal error" );
break;
}
if (res !=7)
error( "Sorry, your CPU is not capable of running 64-bit kernel." );
/* check required feature flags */ /* check required feature flags */
/* see http://www.x86-64.org/lists/discuss/msg02971.html */ /* see http://www.x86-64.org/lists/discuss/msg02971.html */
#define REQUIRED_MASK1 ((1<<0)|(1<<3)|(1<<4)|(1<<5)|(1<<6)|(1<<8)|(1<<11)| \ #define REQUIRED_MASK1 ((1<<0)|(1<<3)|(1<<4)|(1<<5)|(1<<6)|(1<<8)|(1<<11)| \
(1<<13)|(1<<15)|(1<<24)) (1<<13)|(1<<15)|(1<<24))
asm("cpuid" : "=d" (flags), "=a" (tmp) : "1" (0x80000001) : "ebx", "ecx"); asm("cpuid" : "=d" (flags), "=a" (a) : "1" (0x80000001) : "ebx", "ecx");
flags &= REQUIRED_MASK1; flags &= REQUIRED_MASK1;
flags ^= REQUIRED_MASK1; flags ^= REQUIRED_MASK1;
if (flags & (1<<9)) {
puts("WARNING: non APIC mode for long mode kernel is untested.");
puts("In case of trouble use 32bit kernel or enable APIC.");
}
if (flags & (1<<0)) if (flags & (1<<0))
error("CPU misses x87"); error("CPU misses x87 FPU");
if (flags & (1<<3)) if (flags & (1<<3))
error("CPU doesn't support page size extension (PSE)"); error("CPU doesn't support page size extension (PSE)");
if (flags & (1<<4)) if (flags & (1<<4))
...@@ -425,11 +387,23 @@ void check_cpu(void) ...@@ -425,11 +387,23 @@ void check_cpu(void)
error("CPU doesn't support CMOV"); error("CPU doesn't support CMOV");
if (flags & (1<<24)) if (flags & (1<<24))
error("CPU doesn't support FXSAVE/FXRSTOR"); error("CPU doesn't support FXSAVE/FXRSTOR");
if (flags & (1<<29))
error("CPU doesn't support long mode");
#define SSE_MASK ((1<<25)|(1<<26))
asm("cpuid" : "=d" (flags), "=a" (a) : "1" (1) : "ebx", "ecx");
if ((flags & SSE_MASK) != SSE_MASK && isamd) {
/* Only try this on AMD CPUs. */
/* Enable SSE in HWCFG MSR */
asm volatile("rdmsr" : "=d" (d), "=a" (flags) : "c" (0xc0010015));
flags &= ~(1<<15);
asm volatile("wrmsr" :: "d" (d), "a" (flags), "c" (0xc0010015));
}
#define REQUIRED_MASK2 ((1<<25)|(1<<26)) /* Try again */
asm("cpuid" : "=d" (flags), "=a" (tmp) : "1" (1) : "ebx", "ecx"); asm("cpuid" : "=d" (flags), "=a" (a) : "1" (1) : "ebx", "ecx");
flags &= REQUIRED_MASK2; flags &= SSE_MASK;
flags ^= REQUIRED_MASK2; flags ^= SSE_MASK;
if (flags & (1<<25)) if (flags & (1<<25))
error("CPU doesn't support SSE1"); error("CPU doesn't support SSE1");
if (flags & (1<<26)) if (flags & (1<<26))
......
...@@ -51,13 +51,14 @@ ...@@ -51,13 +51,14 @@
#include <asm/errno.h> #include <asm/errno.h>
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr))) #define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
#define force_inline __attribute__((always_inline)) inline
int __sysctl_vsyscall __section_sysctl_vsyscall = 1; int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED; seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
#include <asm/unistd.h> #include <asm/unistd.h>
static inline void timeval_normalize(struct timeval * tv) static force_inline void timeval_normalize(struct timeval * tv)
{ {
time_t __sec; time_t __sec;
...@@ -69,7 +70,7 @@ static inline void timeval_normalize(struct timeval * tv) ...@@ -69,7 +70,7 @@ static inline void timeval_normalize(struct timeval * tv)
} }
} }
static inline void do_vgettimeofday(struct timeval * tv) static force_inline void do_vgettimeofday(struct timeval * tv)
{ {
long sequence, t; long sequence, t;
unsigned long sec, usec; unsigned long sec, usec;
...@@ -91,12 +92,12 @@ static inline void do_vgettimeofday(struct timeval * tv) ...@@ -91,12 +92,12 @@ static inline void do_vgettimeofday(struct timeval * tv)
} }
/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */ /* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
static inline void do_get_tz(struct timezone * tz) static force_inline void do_get_tz(struct timezone * tz)
{ {
*tz = __sys_tz; *tz = __sys_tz;
} }
static inline int gettimeofday(struct timeval *tv, struct timezone *tz) static force_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
{ {
int ret; int ret;
asm volatile("syscall" asm volatile("syscall"
......
...@@ -35,11 +35,15 @@ struct mtrr_sentry ...@@ -35,11 +35,15 @@ struct mtrr_sentry
unsigned int type; /* Type of region */ unsigned int type; /* Type of region */
}; };
/* Warning: this structure has a different order from i386
on x86-64. The 32bit emulation code takes care of that.
But you need to use this for 64bit, otherwise your X server
will break. */
struct mtrr_gentry struct mtrr_gentry
{ {
unsigned long base; /* Base address */ unsigned long base; /* Base address */
unsigned int regnum; /* Register number */
unsigned int size; /* Size of region */ unsigned int size; /* Size of region */
unsigned int regnum; /* Register number */
unsigned int type; /* Type of region */ unsigned int type; /* Type of region */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment