head.S 9.12 KB
Newer Older
1 2 3 4 5 6
/*
 *  linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
 *
 *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
 *  Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
 *  Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7
 *  Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8
 *
9
 *  $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
10
 */
11

12 13 14 15 16 17

#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/desc.h>
#include <asm/segment.h>
#include <asm/page.h>
18
#include <asm/msr.h>
19
	
20
/* we are not able to switch in one step to the final KERNEL ADRESS SPACE
21 22 23 24 25
 * because we need identity-mapped pages on setup so define __START_KERNEL to
 * 0x100000 for this stage
 * 
 */

26 27 28
	.text
	.code32
/* %bx:	 1 if comming from smp trampoline on secondary cpu */ 
29
startup_32:
30
	
31 32 33 34 35 36 37 38
	/*
	 * At this point the CPU runs in 32bit protected mode (CS.D = 1) with
	 * paging disabled and the point of this file is to switch to 64bit
	 * long mode with a kernel mapping for kerneland to jump into the
	 * kernel virtual addresses.
 	 * There is no stack until we set one up.
	 */

39 40
	movl %ebx,%ebp	/* Save trampoline flag */
	
41 42 43 44 45
	/* If the CPU doesn't support CPUID this will double fault.
	 * Unfortunately it is hard to check for CPUID without a stack. 
	 */
	
	/* Check if extended functions are implemented */		
46 47 48 49 50 51 52 53 54 55
	movl	$0x80000000, %eax
	cpuid
	cmpl	$0x80000000, %eax
	jbe	no_long_mode
	/* Check if long mode is implemented */
	mov	$0x80000001, %eax
	cpuid
	btl	$29, %edx
	jnc	no_long_mode

56 57
	movl	%edx,%edi
	
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
	/*
	 * Prepare for entering 64bits mode
	 */

	/* Enable PAE mode and PGE */
	xorl	%eax, %eax
	btsl	$5, %eax
	btsl	$7, %eax
	movl	%eax, %cr4

	/* Setup early boot stage 4 level pagetables */
	movl	$0x101000, %eax
	movl	%eax, %cr3

	/* Setup EFER (Extended Feature Enable Register) */
73
	movl	$MSR_EFER, %ecx
74 75 76 77
	rdmsr
	/* Fool rdmsr and reset %eax to avoid dependences */
	xorl	%eax, %eax
	/* Enable Long Mode */
78
	btsl	$_EFER_LME, %eax
79
	/* Enable System Call */
80 81 82 83 84 85 86 87
	btsl	$_EFER_SCE, %eax

	/* No Execute supported? */	
	btl	$20,%edi
	jnc     1f
	btsl	$_EFER_NX, %eax
1:	
				
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
	/* Make changes effective */
	wrmsr

	xorl	%eax, %eax
	/* Enable paging and in turn activate Long Mode */
	btsl	$31, %eax
	/* Enable protected mode */
	btsl	$0, %eax
	/* Enable MP */
	btsl	$1, %eax
	/* Enable ET */
	btsl	$4, %eax
	/* Enable NE */
	btsl	$5, %eax
	/* Enable WP */
	btsl	$16, %eax
	/* Enable AM */
	btsl	$18, %eax
	/* Make changes effective */
	movl	%eax, %cr0
	jmp	reach_compatibility_mode
reach_compatibility_mode:
	
	/*
	 * At this point we're in long mode but in 32bit compatibility mode
	 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
	 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
	 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
	 */

118 119 120
	testw %bp,%bp	/* secondary CPU? */ 
	jnz   second	
	
121 122 123 124
	/* Load new GDT with the 64bit segment using 32bit descriptor */
	/* to avoid 32bit relocations we use fixed adresses here */
	movl	$0x100F00, %eax
	lgdt	(%eax)
125

126 127 128 129
	movl    $0x100F10, %eax
	/* Finally jump in 64bit mode */
	ljmp	*(%eax)

130 131 132 133 134 135 136 137
second:
	/* abuse syscall to get into 64bit mode. this way we don't need
	   a working low identity mapping just for the short 32bit roundtrip. 
	   XXX kludge. this should not be needed. */
	movl  $MSR_STAR,%ecx
	xorl  %eax,%eax
	movl  $(__USER32_CS<<16)|__KERNEL_CS,%edx
	wrmsr
138

139 140 141 142 143 144 145 146 147 148
	movl  $MSR_CSTAR,%ecx
	movl  $0xffffffff,%edx
	movl  $0x80100100,%eax	# reach_long64 absolute
	wrmsr
	syscall

	.code64
	.org 0x100	
reach_long64:
	movq init_rsp(%rip),%rsp
149 150 151 152 153 154 155 156 157 158 159

	/* zero EFLAGS after setting rsp */
	pushq $0
	popfq

	/*
	 * We must switch to a new descriptor in kernel space for the GDT
	 * because soon the kernel won't have access anymore to the userspace
	 * addresses where we're currently running on. We have to do that here
	 * because in 32bit we couldn't load a 64bit linear address.
	 */
160
	lgdt	cpu_gdt_descr
161

162 163 164 165 166 167 168 169 170 171 172
	/* 
	 * Setup up a dummy PDA. this is just for some early bootup code
	 * that does in_interrupt() 
	 */ 
	movl	$MSR_GS_BASE,%ecx
	movq	$empty_zero_page,%rax
	movq    %rax,%rdx
	shrq	$32,%rdx
	wrmsr	

	/* set up data segments. actually 0 would do too */
173 174
	movl $__KERNEL_DS,%eax
	movl %eax,%ds	
175
	movl %eax,%ss
176 177
	movl %eax,%es
			
178 179 180 181
	/* esi is pointer to real mode structure with interesting info.
	   pass it to C */
	movl	%esi, %edi
	
182 183 184 185 186 187 188 189
	/* Finally jump to run C code and to be on real kernel address
	 * Since we are running on identity-mapped space we have to jump
	 * to the full 64bit address , this is only possible as indirect
	 * jump
	 */
	movq	initial_code(%rip),%rax
	jmp	*%rax

190
	/* SMP bootup changes these two */	
191 192 193
	.globl	initial_code
initial_code:
	.quad	x86_64_start_kernel
194 195 196 197
	.globl init_rsp
init_rsp:
	.quad  init_thread_union+THREAD_SIZE-8

198 199 200 201 202 203 204 205 206 207

.code32
ENTRY(no_long_mode)
	/* This isn't an x86-64 CPU so hang */
1:
	jmp	1b

.org 0xf00
pGDT32:
	.word	gdt32_end-gdt_table32
208
	.long	gdt_table32-__START_KERNEL+0x100000
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224

.org 0xf10	
ljumpvector:
	.long	reach_long64-__START_KERNEL+0x100000
	.word	__KERNEL_CS

ENTRY(stext)
ENTRY(_stext)

	/*
	 * This default setting generates an ident mapping at address 0x100000
	 * and a mapping for the kernel that precisely maps virtual address
	 * 0xffffffff80000000 to physical address 0x000000. (always using
	 * 2Mbyte large pages provided by PAE mode)
	 */
.org 0x1000
225
ENTRY(init_level4_pgt)
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
	.quad	0x0000000000102007		/* -> level3_ident_pgt */
	.fill	255,8,0
	.quad	0x000000000010a007
	.fill	254,8,0
	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
	.quad	0x0000000000103007		/* -> level3_kernel_pgt */

.org 0x2000
/* Kernel does not "know" about 4-th level of page tables. */
ENTRY(level3_ident_pgt)
	.quad	0x0000000000104007
	.fill	511,8,0

.org 0x3000
ENTRY(level3_kernel_pgt)
	.fill	510,8,0
	/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
	.quad	0x0000000000105007		/* -> level2_kernel_pgt */
	.fill	1,8,0

.org 0x4000
ENTRY(level2_ident_pgt)
248
	/* 40MB for bootup. 	*/
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
	.quad	0x0000000000000283
	.quad	0x0000000000200183
	.quad	0x0000000000400183
	.quad	0x0000000000600183
	.quad	0x0000000000800183
	.quad	0x0000000000A00183
	.quad	0x0000000000C00183
	.quad	0x0000000000E00183
	.quad	0x0000000001000183
	.quad	0x0000000001200183
	.quad	0x0000000001400183
	.quad	0x0000000001600183
	.quad	0x0000000001800183
	.quad	0x0000000001A00183
	.quad	0x0000000001C00183
	.quad	0x0000000001E00183
	.quad	0x0000000002000183
	.quad	0x0000000002200183
	.quad	0x0000000002400183
	.quad	0x0000000002600183
269 270 271
	/* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
	.globl temp_boot_pmds
temp_boot_pmds:
272 273 274 275
	.fill	492,8,0
	
.org 0x5000
ENTRY(level2_kernel_pgt)
276
	/* 40MB kernel mapping. The kernel code cannot be bigger than that.
277
	   When you change this change KERNEL_TEXT_SIZE in page.h too. */
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
	/* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
	.quad	0x0000000000000183
	.quad	0x0000000000200183
	.quad	0x0000000000400183
	.quad	0x0000000000600183
	.quad	0x0000000000800183
	.quad	0x0000000000A00183
	.quad	0x0000000000C00183
	.quad	0x0000000000E00183
	.quad	0x0000000001000183
	.quad	0x0000000001200183
	.quad	0x0000000001400183
	.quad	0x0000000001600183
	.quad	0x0000000001800183
	.quad	0x0000000001A00183
	.quad	0x0000000001C00183
	.quad	0x0000000001E00183
	.quad	0x0000000002000183
	.quad	0x0000000002200183
	.quad	0x0000000002400183
	.quad	0x0000000002600183
299
	/* Module mapping starts here */
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
	.fill	492,8,0

.org 0x6000
ENTRY(empty_zero_page)

.org 0x7000
ENTRY(empty_bad_page)

.org 0x8000
ENTRY(empty_bad_pte_table)

.org 0x9000
ENTRY(empty_bad_pmd_table)

.org 0xa000
ENTRY(level3_physmem_pgt)
	.quad	0x0000000000105007		/* -> level2_kernel_pgt (so that __va works even before pagetable_init) */

.org 0xb000
.data

	.align 16
322 323 324
	.globl cpu_gdt_descr
cpu_gdt_descr:
	.word	gdt_end-cpu_gdt_table
325
gdt:
326 327 328 329 330 331 332
	.quad	cpu_gdt_table
#ifdef CONFIG_SMP
	.rept	NR_CPUS-1
	.word	0
	.quad	0
	.endr
#endif
333 334 335 336 337 338 339 340 341 342 343 344 345 346

.align 64 /* cacheline aligned */
ENTRY(gdt_table32)
	.quad	0x0000000000000000	/* This one is magic */
	.quad	0x0000000000000000	/* unused */
	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
gdt32_end:	
	
/* We need valid kernel segments for data and code in long mode too
 * IRET will check the segment types  kkeil 2000/10/28
 * Also sysret mandates a special GDT layout 
 */
		 		
.align 64 /* cacheline aligned, keep this synchronized with asm/desc.h */
347 348 349 350 351 352

/* The TLS descriptors are currently at a different place compared to i386.
   Hopefully nobody expects them at a fixed place (Wine?) */
	
ENTRY(cpu_gdt_table)
	.quad	0x0000000000000000	/* NULL descriptor */
353 354 355 356 357 358
	.quad	0x0000000000000000	/* unused */
	.quad	0x00af9a000000ffff	/* __KERNEL_CS */
	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
	.quad	0x00cffe000000ffff	/* __USER32_CS */
	.quad	0x00cff2000000ffff	/* __USER_DS, __USER32_DS  */		
	.quad	0x00affa000000ffff	/* __USER_CS */
359 360 361 362 363 364
	.word	0xFFFF				# 4Gb - (0x100000*0x1000 = 4Gb)
	.word	0				# base address = 0
	.word	0x9A00				# code read/exec
	.word	0x00CF				# granularity = 4096, 386
						#  (+5th nibble of limit)
					/* __KERNEL32_CS */
365 366 367 368
	.quad	0,0			/* TSS */
	.quad	0			/* LDT */
	.quad   0,0,0			/* three TLS descriptors */ 
	.quad	0,0			/* pad to cache line boundary */
369 370 371
gdt_end:	
	.globl gdt_end

372 373 374 375 376 377 378
	/* GDTs of other CPUs */	
#ifdef CONFIG_SMP
	.rept NR_CPUS-1
	.quad 0,0,0,0,0,0,0,0,0,0,0
	.endr	
#endif

379 380 381 382 383 384
	.align  64
ENTRY(idt_table)	
	.rept   256
	.quad   0
	.quad 	0
	.endr