Commit b233969e authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/untangle2' of...

Merge branch 'x86/untangle2' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen into x86/headers

Conflicts:
	arch/x86/include/asm/page.h
	arch/x86/include/asm/pgtable.h
	arch/x86/mach-voyager/voyager_smp.c
	arch/x86/mm/fault.c
parents d040c161 54321d94
#ifndef _ASM_X86_PAGE_H #ifndef _ASM_X86_PAGE_H
#define _ASM_X86_PAGE_H #define _ASM_X86_PAGE_H
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef __KERNEL__ #ifdef __KERNEL__
#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) #include <asm/page_types.h>
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGE_MAX_HSTATE 2
#ifndef __ASSEMBLY__
#include <linux/types.h>
#endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#include <asm/page_64.h> #include <asm/page_64.h>
...@@ -44,28 +11,8 @@ ...@@ -44,28 +11,8 @@
#include <asm/page_32.h> #include <asm/page_32.h>
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VM_DATA_DEFAULT_FLAGS \
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
typedef struct { pgdval_t pgd; } pgd_t;
typedef struct { pgprotval_t pgprot; } pgprot_t;
extern int page_is_ram(unsigned long pagenr);
extern int devmem_is_allowed(unsigned long pagenr);
extern void map_devmem(unsigned long pfn, unsigned long size,
pgprot_t vma_prot);
extern void unmap_devmem(unsigned long pfn, unsigned long size,
pgprot_t vma_prot);
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;
struct page; struct page;
static inline void clear_user_page(void *page, unsigned long vaddr, static inline void clear_user_page(void *page, unsigned long vaddr,
...@@ -84,114 +31,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, ...@@ -84,114 +31,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
static inline pgd_t native_make_pgd(pgdval_t val)
{
return (pgd_t) { val };
}
static inline pgdval_t native_pgd_val(pgd_t pgd)
{
return pgd.pgd;
}
static inline pgdval_t pgd_flags(pgd_t pgd)
{
return native_pgd_val(pgd) & PTE_FLAGS_MASK;
}
#if PAGETABLE_LEVELS >= 3
#if PAGETABLE_LEVELS == 4
typedef struct { pudval_t pud; } pud_t;
static inline pud_t native_make_pud(pmdval_t val)
{
return (pud_t) { val };
}
static inline pudval_t native_pud_val(pud_t pud)
{
return pud.pud;
}
#else /* PAGETABLE_LEVELS == 3 */
#include <asm-generic/pgtable-nopud.h>
static inline pudval_t native_pud_val(pud_t pud)
{
return native_pgd_val(pud.pgd);
}
#endif /* PAGETABLE_LEVELS == 4 */
static inline pudval_t pud_flags(pud_t pud)
{
return native_pud_val(pud) & PTE_FLAGS_MASK;
}
typedef struct { pmdval_t pmd; } pmd_t;
static inline pmd_t native_make_pmd(pmdval_t val)
{
return (pmd_t) { val };
}
static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return pmd.pmd;
}
#else /* PAGETABLE_LEVELS == 2 */
#include <asm-generic/pgtable-nopmd.h>
static inline pmdval_t native_pmd_val(pmd_t pmd)
{
return native_pgd_val(pmd.pud.pgd);
}
#endif /* PAGETABLE_LEVELS >= 3 */
static inline pmdval_t pmd_flags(pmd_t pmd)
{
return native_pmd_val(pmd) & PTE_FLAGS_MASK;
}
static inline pte_t native_make_pte(pteval_t val)
{
return (pte_t) { .pte = val };
}
static inline pteval_t native_pte_val(pte_t pte)
{
return pte.pte;
}
static inline pteval_t pte_flags(pte_t pte)
{
return native_pte_val(pte) & PTE_FLAGS_MASK;
}
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
#define pgd_val(x) native_pgd_val(x)
#define __pgd(x) native_make_pgd(x)
#ifndef __PAGETABLE_PUD_FOLDED
#define pud_val(x) native_pud_val(x)
#define __pud(x) native_make_pud(x)
#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_val(x) native_pmd_val(x)
#define __pmd(x) native_make_pmd(x)
#endif
#define pte_val(x) native_pte_val(x)
#define __pte(x) native_make_pte(x)
#endif /* CONFIG_PARAVIRT */
#define __pa(x) __phys_addr((unsigned long)(x)) #define __pa(x) __phys_addr((unsigned long)(x))
#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x)) #define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
/* __pa_symbol should be used for C visible symbols. /* __pa_symbol should be used for C visible symbols.
......
#ifndef _ASM_X86_PAGE_32_H #ifndef _ASM_X86_PAGE_32_H
#define _ASM_X86_PAGE_32_H #define _ASM_X86_PAGE_32_H
/* #include <asm/page_32_types.h>
* This handles the memory map.
*
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has
* a virtual address space of one gigabyte, which limits the
* amount of physical memory you can use to about 950MB.
*
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G
* and CONFIG_HIGHMEM64G options in the kernel configuration.
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#ifdef CONFIG_4KSTACKS
#define THREAD_ORDER 0
#else
#define THREAD_ORDER 1
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define STACKFAULT_STACK 0
#define DOUBLEFAULT_STACK 1
#define NMI_STACK 0
#define DEBUG_STACK 0
#define MCE_STACK 0
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
/* 44=32+12, the limit we can fit into an unsigned long pfn */
#define __PHYSICAL_MASK_SHIFT 44
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 3
#ifndef __ASSEMBLY__
typedef u64 pteval_t;
typedef u64 pmdval_t;
typedef u64 pudval_t;
typedef u64 pgdval_t;
typedef u64 pgprotval_t;
typedef union {
struct {
unsigned long pte_low, pte_high;
};
pteval_t pte;
} pte_t;
#endif /* __ASSEMBLY__
*/
#else /* !CONFIG_X86_PAE */
#define __PHYSICAL_MASK_SHIFT 32
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 2
#ifndef __ASSEMBLY__
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef union {
pteval_t pte;
pteval_t pte_low;
} pte_t;
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_X86_PAE */
#ifndef __ASSEMBLY__
typedef struct page *pgtable_t;
#endif
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif #endif
#ifndef __ASSEMBLY__
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
#ifdef CONFIG_DEBUG_VIRTUAL #ifdef CONFIG_DEBUG_VIRTUAL
extern unsigned long __phys_addr(unsigned long); extern unsigned long __phys_addr(unsigned long);
...@@ -89,22 +19,7 @@ extern unsigned long __phys_addr(unsigned long); ...@@ -89,22 +19,7 @@ extern unsigned long __phys_addr(unsigned long);
#define pfn_valid(pfn) ((pfn) < max_mapnr) #define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_FLATMEM */ #endif /* CONFIG_FLATMEM */
extern int nx_enabled; #ifndef __ASSEMBLY__
/*
* This much address space is reserved for vmalloc() and iomap()
* as well as fixmap mappings.
*/
extern unsigned int __VMALLOC_RESERVE;
extern int sysctl_legacy_va_layout;
extern void find_low_pfn_range(void);
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long, unsigned long);
extern void free_initmem(void);
extern void setup_bootmem_allocator(void);
#ifdef CONFIG_X86_USE_3DNOW #ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h> #include <asm/mmx.h>
......
#ifndef _ASM_X86_PAGE_32_DEFS_H
#define _ASM_X86_PAGE_32_DEFS_H
#include <linux/const.h>
/*
* This handles the memory map.
*
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has
* a virtual address space of one gigabyte, which limits the
* amount of physical memory you can use to about 950MB.
*
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G
* and CONFIG_HIGHMEM64G options in the kernel configuration.
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#ifdef CONFIG_4KSTACKS
#define THREAD_ORDER 0
#else
#define THREAD_ORDER 1
#endif
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define STACKFAULT_STACK 0
#define DOUBLEFAULT_STACK 1
#define NMI_STACK 0
#define DEBUG_STACK 0
#define MCE_STACK 0
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
/* 44=32+12, the limit we can fit into an unsigned long pfn */
#define __PHYSICAL_MASK_SHIFT 44
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 3
#else /* !CONFIG_X86_PAE */
#define __PHYSICAL_MASK_SHIFT 32
#define __VIRTUAL_MASK_SHIFT 32
#define PAGETABLE_LEVELS 2
#endif /* CONFIG_X86_PAE */
#ifndef __ASSEMBLY__
/*
* This much address space is reserved for vmalloc() and iomap()
* as well as fixmap mappings.
*/
extern unsigned int __VMALLOC_RESERVE;
extern int sysctl_legacy_va_layout;
extern void find_low_pfn_range(void);
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long, unsigned long);
extern void free_initmem(void);
extern void setup_bootmem_allocator(void);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PAGE_32_DEFS_H */
#ifndef _ASM_X86_PAGE_64_H #ifndef _ASM_X86_PAGE_64_H
#define _ASM_X86_PAGE_64_H #define _ASM_X86_PAGE_64_H
#define PAGETABLE_LEVELS 4 #include <asm/page_64_types.h>
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
#define IRQ_STACK_ORDER 2
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
#define STACKFAULT_STACK 1
#define DOUBLEFAULT_STACK 2
#define NMI_STACK 3
#define DEBUG_STACK 4
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
/*
* Set __PAGE_OFFSET to the most negative possible address +
* PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires.
*/
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __KERNEL_ALIGN 0x200000
/*
* Make sure kernel is aligned to 2MB address. Catching it at compile
* time is better. Change your config file and compile the kernel
* for a 2MB aligned address (CONFIG_PHYSICAL_START)
*/
#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
#endif
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/x86_64/mm.txt for a description of the memory map. */
#define __PHYSICAL_MASK_SHIFT 46
#define __VIRTUAL_MASK_SHIFT 48
/*
* Kernel image size is limited to 512 MB (see level2_kernel_pgt in
* arch/x86/kernel/head_64.S), and it is mapped here:
*/
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
#ifndef __ASSEMBLY__
void clear_page(void *page);
void copy_page(void *to, void *from);
/* duplicated to the one in bootmem.h */
extern unsigned long max_pfn;
extern unsigned long phys_base;
extern unsigned long __phys_addr(unsigned long);
#define __phys_reloc_hide(x) (x)
/*
* These are used to make use of C type-checking..
*/
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef struct page *pgtable_t;
typedef struct { pteval_t pte; } pte_t;
#define vmemmap ((struct page *)VMEMMAP_START)
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
extern void free_initmem(void);
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < max_pfn)
#endif
#endif /* _ASM_X86_PAGE_64_H */ #endif /* _ASM_X86_PAGE_64_H */
***************
*** 1,105 ****
#ifndef _ASM_X86_PAGE_64_H
#define _ASM_X86_PAGE_64_H
- #define PAGETABLE_LEVELS 4
-
- #define THREAD_ORDER 1
- #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
- #define CURRENT_MASK (~(THREAD_SIZE - 1))
-
- #define EXCEPTION_STACK_ORDER 0
- #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-
- #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
- #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
-
- #define IRQSTACK_ORDER 2
- #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
-
- #define STACKFAULT_STACK 1
- #define DOUBLEFAULT_STACK 2
- #define NMI_STACK 3
- #define DEBUG_STACK 4
- #define MCE_STACK 5
- #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
-
- #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
- #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
-
- /*
- * Set __PAGE_OFFSET to the most negative possible address +
- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
- * what Xen requires.
- */
- #define __PAGE_OFFSET _AC(0xffff880000000000, UL)
-
- #define __PHYSICAL_START CONFIG_PHYSICAL_START
- #define __KERNEL_ALIGN 0x200000
-
- /*
- * Make sure kernel is aligned to 2MB address. Catching it at compile
- * time is better. Change your config file and compile the kernel
- * for a 2MB aligned address (CONFIG_PHYSICAL_START)
- */
- #if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
- #error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
- #endif
-
- #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
- #define __START_KERNEL_map _AC(0xffffffff80000000, UL)
-
- /* See Documentation/x86_64/mm.txt for a description of the memory map. */
- #define __PHYSICAL_MASK_SHIFT 46
- #define __VIRTUAL_MASK_SHIFT 48
-
- /*
- * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
- * arch/x86/kernel/head_64.S), and it is mapped here:
- */
- #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
- #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
-
- #ifndef __ASSEMBLY__
- void clear_page(void *page);
- void copy_page(void *to, void *from);
-
- /* duplicated to the one in bootmem.h */
- extern unsigned long max_pfn;
- extern unsigned long phys_base;
-
- extern unsigned long __phys_addr(unsigned long);
- #define __phys_reloc_hide(x) (x)
-
- /*
- * These are used to make use of C type-checking..
- */
- typedef unsigned long pteval_t;
- typedef unsigned long pmdval_t;
- typedef unsigned long pudval_t;
- typedef unsigned long pgdval_t;
- typedef unsigned long pgprotval_t;
-
- typedef struct page *pgtable_t;
-
- typedef struct { pteval_t pte; } pte_t;
-
- #define vmemmap ((struct page *)VMEMMAP_START)
-
- extern unsigned long init_memory_mapping(unsigned long start,
- unsigned long end);
-
- extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
- extern void free_initmem(void);
-
- extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
- extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
-
- #endif /* !__ASSEMBLY__ */
-
- #ifdef CONFIG_FLATMEM
- #define pfn_valid(pfn) ((pfn) < max_pfn)
- #endif
-
#endif /* _ASM_X86_PAGE_64_H */
--- 1,6 ----
#ifndef _ASM_X86_PAGE_64_H
#define _ASM_X86_PAGE_64_H
+ #include <asm/page_64_types.h>
#endif /* _ASM_X86_PAGE_64_H */
#ifndef _ASM_X86_PAGE_64_DEFS_H
#define _ASM_X86_PAGE_64_DEFS_H
#define PAGETABLE_LEVELS 4
#define THREAD_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
#define IRQ_STACK_ORDER 2
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
#define STACKFAULT_STACK 1
#define DOUBLEFAULT_STACK 2
#define NMI_STACK 3
#define DEBUG_STACK 4
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
/*
* Set __PAGE_OFFSET to the most negative possible address +
* PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
* hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
* what Xen requires.
*/
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __KERNEL_ALIGN 0x200000
/*
* Make sure kernel is aligned to 2MB address. Catching it at compile
* time is better. Change your config file and compile the kernel
* for a 2MB aligned address (CONFIG_PHYSICAL_START)
*/
#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
#endif
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/x86_64/mm.txt for a description of the memory map. */
#define __PHYSICAL_MASK_SHIFT 46
#define __VIRTUAL_MASK_SHIFT 48
/*
* Kernel image size is limited to 512 MB (see level2_kernel_pgt in
* arch/x86/kernel/head_64.S), and it is mapped here:
*/
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
#ifndef __ASSEMBLY__
void clear_page(void *page);
void copy_page(void *to, void *from);
/* duplicated to the one in bootmem.h */
extern unsigned long max_pfn;
extern unsigned long phys_base;
extern unsigned long __phys_addr(unsigned long);
#define __phys_reloc_hide(x) (x)
#define vmemmap ((struct page *)VMEMMAP_START)
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
extern void free_initmem(void);
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < max_pfn)
#endif
#endif /* _ASM_X86_PAGE_64_DEFS_H */
#ifndef _ASM_X86_PAGE_DEFS_H
#define _ASM_X86_PAGE_DEFS_H
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
/* Cast PAGE_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGE_MAX_HSTATE 2
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VM_DATA_DEFAULT_FLAGS \
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
#else
#include <asm/page_32_types.h>
#endif /* CONFIG_X86_64 */
#ifndef __ASSEMBLY__
struct pgprot;
extern int page_is_ram(unsigned long pagenr);
extern int devmem_is_allowed(unsigned long pagenr);
extern void map_devmem(unsigned long pfn, unsigned long size,
struct pgprot vma_prot);
extern void unmap_devmem(unsigned long pfn, unsigned long size,
struct pgprot vma_prot);
extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PAGE_DEFS_H */
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* para-virtualization: those hooks are defined here. */ * para-virtualization: those hooks are defined here. */
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/page.h> #include <asm/pgtable_types.h>
#include <asm/asm.h> #include <asm/asm.h>
/* Bitmask of what can be clobbered: usually at least eax. */ /* Bitmask of what can be clobbered: usually at least eax. */
......
#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H #ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H
#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H #define _ASM_X86_PGTABLE_2LEVEL_DEFS_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef union {
pteval_t pte;
pteval_t pte_low;
} pte_t;
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0 #define SHARED_KERNEL_PMD 0
/* /*
......
#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H #ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H
#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H #define _ASM_X86_PGTABLE_3LEVEL_DEFS_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
typedef u64 pteval_t;
typedef u64 pmdval_t;
typedef u64 pudval_t;
typedef u64 pgdval_t;
typedef u64 pgprotval_t;
typedef union {
struct {
unsigned long pte_low, pte_high;
};
pteval_t pte;
} pte_t;
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) #define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
#else #else
...@@ -25,4 +42,5 @@ ...@@ -25,4 +42,5 @@
*/ */
#define PTRS_PER_PTE 512 #define PTRS_PER_PTE 512
#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */ #endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
This diff is collapsed.
#ifndef _ASM_X86_PGTABLE_32_H #ifndef _ASM_X86_PGTABLE_32_H
#define _ASM_X86_PGTABLE_32_H #define _ASM_X86_PGTABLE_32_H
#include <asm/pgtable_32_types.h>
/* /*
* The Linux memory management assumes a three-level page table setup. On * The Linux memory management assumes a three-level page table setup. On
...@@ -33,47 +34,6 @@ void paging_init(void); ...@@ -33,47 +34,6 @@ void paging_init(void);
extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
/*
* The Linux x86 paging architecture is 'compile-time dual-mode', it
* implements both the traditional 2-level x86 page tables and the
* newer 3-level PAE-mode page tables.
*/
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level-defs.h>
# define PMD_SIZE (1UL << PMD_SHIFT)
# define PMD_MASK (~(PMD_SIZE - 1))
#else
# include <asm/pgtable-2level-defs.h>
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
#endif
#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
& PMD_MASK)
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
#endif
#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
/* /*
* Define this if things work differently on an i386 and an i486: * Define this if things work differently on an i386 and an i486:
......
#ifndef _ASM_X86_PGTABLE_32_DEFS_H
#define _ASM_X86_PGTABLE_32_DEFS_H
/*
* The Linux x86 paging architecture is 'compile-time dual-mode', it
* implements both the traditional 2-level x86 page tables and the
* newer 3-level PAE-mode page tables.
*/
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level_types.h>
# define PMD_SIZE (1UL << PMD_SHIFT)
# define PMD_MASK (~(PMD_SIZE - 1))
#else
# include <asm/pgtable-2level_types.h>
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)
#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
#endif
#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
& PMD_MASK)
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
#endif
#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
#endif /* _ASM_X86_PGTABLE_32_DEFS_H */
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
#define _ASM_X86_PGTABLE_64_H #define _ASM_X86_PGTABLE_64_H
#include <linux/const.h> #include <linux/const.h>
#include <asm/pgtable_64_types.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* /*
...@@ -25,32 +27,6 @@ extern void paging_init(void); ...@@ -25,32 +27,6 @@ extern void paging_init(void);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/
#define PGDIR_SHIFT 39
#define PTRS_PER_PGD 512
/*
* 3rd level page
*/
#define PUD_SHIFT 30
#define PTRS_PER_PUD 512
/*
* PMD_SHIFT determines the size of the area a middle-level
* page table can map
*/
#define PMD_SHIFT 21
#define PTRS_PER_PMD 512
/*
* entries per page directory level
*/
#define PTRS_PER_PTE 512
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define pte_ERROR(e) \ #define pte_ERROR(e) \
...@@ -130,26 +106,6 @@ static inline void native_pgd_clear(pgd_t *pgd) ...@@ -130,26 +106,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
native_set_pgd(pgd, native_make_pgd(0)); native_set_pgd(pgd, native_make_pgd(0));
} }
#endif /* !__ASSEMBLY__ */
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE - 1))
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE - 1))
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
#define VMALLOC_START _AC(0xffffc20000000000, UL)
#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffe20000000000, UL)
#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#ifndef __ASSEMBLY__
/* /*
* Conversion functions: convert a page and protection to a page entry, * Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to. * and a page entry and page directory to the page they refer to.
......
#ifndef _ASM_X86_PGTABLE_64_DEFS_H
#define _ASM_X86_PGTABLE_64_DEFS_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
/*
* These are used to make use of C type-checking..
*/
typedef unsigned long pteval_t;
typedef unsigned long pmdval_t;
typedef unsigned long pudval_t;
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
typedef struct { pteval_t pte; } pte_t;
#endif /* !__ASSEMBLY__ */
#define SHARED_KERNEL_PMD 0
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/
#define PGDIR_SHIFT 39
#define PTRS_PER_PGD 512
/*
* 3rd level page
*/
#define PUD_SHIFT 30
#define PTRS_PER_PUD 512
/*
* PMD_SHIFT determines the size of the area a middle-level
* page table can map
*/
#define PMD_SHIFT 21
#define PTRS_PER_PMD 512
/*
* entries per page directory level
*/
#define PTRS_PER_PTE 512
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE - 1))
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE - 1))
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
#define VMALLOC_START _AC(0xffffc20000000000, UL)
#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffe20000000000, UL)
#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
This diff is collapsed.
...@@ -16,6 +16,7 @@ struct mm_struct; ...@@ -16,6 +16,7 @@ struct mm_struct;
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/desc_defs.h> #include <asm/desc_defs.h>
......
...@@ -1747,10 +1747,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void) ...@@ -1747,10 +1747,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
switch_to_new_gdt(cpu); switch_to_new_gdt(cpu);
cpu_online_map = cpumask_of_cpu(smp_processor_id()); cpu_set(cpu, cpu_online_map);
cpu_callout_map = cpumask_of_cpu(smp_processor_id()); cpu_set(cpu, cpu_callout_map);
cpu_callin_map = CPU_MASK_NONE; cpu_set(cpu, cpu_possible_map);
cpu_present_map = cpumask_of_cpu(smp_processor_id()); cpu_set(cpu, cpu_present_map);
} }
static int __cpuinit voyager_cpu_up(unsigned int cpu) static int __cpuinit voyager_cpu_up(unsigned int cpu)
......
...@@ -851,6 +851,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -851,6 +851,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
return; return;
} }
/* kprobes don't want to hook the spurious faults. */
if (unlikely(notify_page_fault(regs))) if (unlikely(notify_page_fault(regs)))
return; return;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment