Commit 65ade2f8 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Ingo Molnar

x86/boot/64: Rename init_level4_pgt and early_level4_pgt

With CONFIG_X86_5LEVEL=y, level 4 is no longer top level of page tables.

Let's give these variable more generic names: init_top_pgt and
early_top_pgt.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: default avatarJuergen Gross <jgross@suse.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20170606113133.22974-9-kirill.shutemov@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c88d7150
...@@ -922,7 +922,7 @@ extern pgd_t trampoline_pgd_entry; ...@@ -922,7 +922,7 @@ extern pgd_t trampoline_pgd_entry;
static inline void __meminit init_trampoline_default(void) static inline void __meminit init_trampoline_default(void)
{ {
/* Default trampoline pgd value */ /* Default trampoline pgd value */
trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)]; trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
} }
# ifdef CONFIG_RANDOMIZE_MEMORY # ifdef CONFIG_RANDOMIZE_MEMORY
void __meminit init_trampoline(void); void __meminit init_trampoline(void);
......
...@@ -20,9 +20,9 @@ extern pmd_t level2_kernel_pgt[512]; ...@@ -20,9 +20,9 @@ extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512]; extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512]; extern pmd_t level2_ident_pgt[512];
extern pte_t level1_fixmap_pgt[512]; extern pte_t level1_fixmap_pgt[512];
extern pgd_t init_level4_pgt[]; extern pgd_t init_top_pgt[];
#define swapper_pg_dir init_level4_pgt #define swapper_pg_dir init_top_pgt
extern void paging_init(void); extern void paging_init(void);
......
...@@ -125,7 +125,7 @@ void __init init_espfix_bsp(void) ...@@ -125,7 +125,7 @@ void __init init_espfix_bsp(void)
p4d_t *p4d; p4d_t *p4d;
/* Install the espfix pud into the kernel page directory */ /* Install the espfix pud into the kernel page directory */
pgd = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)]; pgd = &init_top_pgt[pgd_index(ESPFIX_BASE_ADDR)];
p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR); p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR);
p4d_populate(&init_mm, p4d, espfix_pud_page); p4d_populate(&init_mm, p4d, espfix_pud_page);
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
/* /*
* Manage page tables very early on. * Manage page tables very early on.
*/ */
extern pgd_t early_level4_pgt[PTRS_PER_PGD]; extern pgd_t early_top_pgt[PTRS_PER_PGD];
extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
static unsigned int __initdata next_early_pgt; static unsigned int __initdata next_early_pgt;
pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
...@@ -67,7 +67,7 @@ void __init __startup_64(unsigned long physaddr) ...@@ -67,7 +67,7 @@ void __init __startup_64(unsigned long physaddr)
/* Fixup the physical addresses in the page table */ /* Fixup the physical addresses in the page table */
pgd = fixup_pointer(&early_level4_pgt, physaddr); pgd = fixup_pointer(&early_top_pgt, physaddr);
pgd[pgd_index(__START_KERNEL_map)] += load_delta; pgd[pgd_index(__START_KERNEL_map)] += load_delta;
pud = fixup_pointer(&level3_kernel_pgt, physaddr); pud = fixup_pointer(&level3_kernel_pgt, physaddr);
...@@ -124,9 +124,9 @@ void __init __startup_64(unsigned long physaddr) ...@@ -124,9 +124,9 @@ void __init __startup_64(unsigned long physaddr)
/* Wipe all early page tables except for the kernel symbol map */ /* Wipe all early page tables except for the kernel symbol map */
static void __init reset_early_page_tables(void) static void __init reset_early_page_tables(void)
{ {
memset(early_level4_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1)); memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
next_early_pgt = 0; next_early_pgt = 0;
write_cr3(__pa_nodebug(early_level4_pgt)); write_cr3(__pa_nodebug(early_top_pgt));
} }
/* Create a new PMD entry */ /* Create a new PMD entry */
...@@ -138,12 +138,11 @@ int __init early_make_pgtable(unsigned long address) ...@@ -138,12 +138,11 @@ int __init early_make_pgtable(unsigned long address)
pmdval_t pmd, *pmd_p; pmdval_t pmd, *pmd_p;
/* Invalid address or early pgt is done ? */ /* Invalid address or early pgt is done ? */
if (physaddr >= MAXMEM || if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
read_cr3_pa() != __pa_nodebug(early_level4_pgt))
return -1; return -1;
again: again:
pgd_p = &early_level4_pgt[pgd_index(address)].pgd; pgd_p = &early_top_pgt[pgd_index(address)].pgd;
pgd = *pgd_p; pgd = *pgd_p;
/* /*
...@@ -240,7 +239,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) ...@@ -240,7 +239,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
clear_bss(); clear_bss();
clear_page(init_level4_pgt); clear_page(init_top_pgt);
kasan_early_init(); kasan_early_init();
...@@ -255,8 +254,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) ...@@ -255,8 +254,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
*/ */
load_ucode_bsp(); load_ucode_bsp();
/* set init_level4_pgt kernel high mapping*/ /* set init_top_pgt kernel high mapping*/
init_level4_pgt[511] = early_level4_pgt[511]; init_top_pgt[511] = early_top_pgt[511];
x86_64_start_reservations(real_mode_data); x86_64_start_reservations(real_mode_data);
} }
......
...@@ -77,7 +77,7 @@ startup_64: ...@@ -77,7 +77,7 @@ startup_64:
call __startup_64 call __startup_64
popq %rsi popq %rsi
movq $(early_level4_pgt - __START_KERNEL_map), %rax movq $(early_top_pgt - __START_KERNEL_map), %rax
jmp 1f jmp 1f
ENTRY(secondary_startup_64) ENTRY(secondary_startup_64)
/* /*
...@@ -97,7 +97,7 @@ ENTRY(secondary_startup_64) ...@@ -97,7 +97,7 @@ ENTRY(secondary_startup_64)
/* Sanitize CPU configuration */ /* Sanitize CPU configuration */
call verify_cpu call verify_cpu
movq $(init_level4_pgt - __START_KERNEL_map), %rax movq $(init_top_pgt - __START_KERNEL_map), %rax
1: 1:
/* Enable PAE mode and PGE */ /* Enable PAE mode and PGE */
...@@ -328,7 +328,7 @@ GLOBAL(name) ...@@ -328,7 +328,7 @@ GLOBAL(name)
.endr .endr
__INITDATA __INITDATA
NEXT_PAGE(early_level4_pgt) NEXT_PAGE(early_top_pgt)
.fill 511,8,0 .fill 511,8,0
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
...@@ -338,14 +338,14 @@ NEXT_PAGE(early_dynamic_pgts) ...@@ -338,14 +338,14 @@ NEXT_PAGE(early_dynamic_pgts)
.data .data
#ifndef CONFIG_XEN #ifndef CONFIG_XEN
NEXT_PAGE(init_level4_pgt) NEXT_PAGE(init_top_pgt)
.fill 512,8,0 .fill 512,8,0
#else #else
NEXT_PAGE(init_level4_pgt) NEXT_PAGE(init_top_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
.org init_level4_pgt + L4_PAGE_OFFSET*8, 0 .org init_top_pgt + L4_PAGE_OFFSET*8, 0
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
.org init_level4_pgt + L4_START_KERNEL*8, 0 .org init_top_pgt + L4_START_KERNEL*8, 0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
......
...@@ -347,7 +347,7 @@ void machine_kexec(struct kimage *image) ...@@ -347,7 +347,7 @@ void machine_kexec(struct kimage *image)
void arch_crash_save_vmcoreinfo(void) void arch_crash_save_vmcoreinfo(void)
{ {
VMCOREINFO_NUMBER(phys_base); VMCOREINFO_NUMBER(phys_base);
VMCOREINFO_SYMBOL(init_level4_pgt); VMCOREINFO_SYMBOL(init_top_pgt);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
VMCOREINFO_SYMBOL(node_data); VMCOREINFO_SYMBOL(node_data);
......
...@@ -431,7 +431,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, ...@@ -431,7 +431,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
bool checkwx) bool checkwx)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
pgd_t *start = (pgd_t *) &init_level4_pgt; pgd_t *start = (pgd_t *) &init_top_pgt;
#else #else
pgd_t *start = swapper_pg_dir; pgd_t *start = swapper_pg_dir;
#endif #endif
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/sections.h> #include <asm/sections.h>
extern pgd_t early_level4_pgt[PTRS_PER_PGD]; extern pgd_t early_top_pgt[PTRS_PER_PGD];
extern struct range pfn_mapped[E820_MAX_ENTRIES]; extern struct range pfn_mapped[E820_MAX_ENTRIES];
static int __init map_range(struct range *range) static int __init map_range(struct range *range)
...@@ -109,8 +109,8 @@ void __init kasan_early_init(void) ...@@ -109,8 +109,8 @@ void __init kasan_early_init(void)
for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++) for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
kasan_zero_p4d[i] = __p4d(p4d_val); kasan_zero_p4d[i] = __p4d(p4d_val);
kasan_map_early_shadow(early_level4_pgt); kasan_map_early_shadow(early_top_pgt);
kasan_map_early_shadow(init_level4_pgt); kasan_map_early_shadow(init_top_pgt);
} }
void __init kasan_init(void) void __init kasan_init(void)
...@@ -121,8 +121,8 @@ void __init kasan_init(void) ...@@ -121,8 +121,8 @@ void __init kasan_init(void)
register_die_notifier(&kasan_die_notifier); register_die_notifier(&kasan_die_notifier);
#endif #endif
memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt)); memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
load_cr3(early_level4_pgt); load_cr3(early_top_pgt);
__flush_tlb_all(); __flush_tlb_all();
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
...@@ -148,7 +148,7 @@ void __init kasan_init(void) ...@@ -148,7 +148,7 @@ void __init kasan_init(void)
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
(void *)KASAN_SHADOW_END); (void *)KASAN_SHADOW_END);
load_cr3(init_level4_pgt); load_cr3(init_top_pgt);
__flush_tlb_all(); __flush_tlb_all();
/* /*
......
...@@ -102,7 +102,7 @@ static void __init setup_real_mode(void) ...@@ -102,7 +102,7 @@ static void __init setup_real_mode(void)
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
trampoline_pgd[0] = trampoline_pgd_entry.pgd; trampoline_pgd[0] = trampoline_pgd_entry.pgd;
trampoline_pgd[511] = init_level4_pgt[511].pgd; trampoline_pgd[511] = init_top_pgt[511].pgd;
#endif #endif
} }
......
...@@ -1465,8 +1465,8 @@ static void xen_write_cr3(unsigned long cr3) ...@@ -1465,8 +1465,8 @@ static void xen_write_cr3(unsigned long cr3)
* At the start of the day - when Xen launches a guest, it has already * At the start of the day - when Xen launches a guest, it has already
* built pagetables for the guest. We diligently look over them * built pagetables for the guest. We diligently look over them
* in xen_setup_kernel_pagetable and graft as appropriate them in the * in xen_setup_kernel_pagetable and graft as appropriate them in the
* init_level4_pgt and its friends. Then when we are happy we load * init_top_pgt and its friends. Then when we are happy we load
* the new init_level4_pgt - and continue on. * the new init_top_pgt - and continue on.
* *
* The generic code starts (start_kernel) and 'init_mem_mapping' sets * The generic code starts (start_kernel) and 'init_mem_mapping' sets
* up the rest of the pagetables. When it has completed it loads the cr3. * up the rest of the pagetables. When it has completed it loads the cr3.
...@@ -1909,12 +1909,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1909,12 +1909,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
pt_end = pt_base + xen_start_info->nr_pt_frames; pt_end = pt_base + xen_start_info->nr_pt_frames;
/* Zap identity mapping */ /* Zap identity mapping */
init_level4_pgt[0] = __pgd(0); init_top_pgt[0] = __pgd(0);
/* Pre-constructed entries are in pfn, so convert to mfn */ /* Pre-constructed entries are in pfn, so convert to mfn */
/* L4[272] -> level3_ident_pgt */ /* L4[272] -> level3_ident_pgt */
/* L4[511] -> level3_kernel_pgt */ /* L4[511] -> level3_kernel_pgt */
convert_pfn_mfn(init_level4_pgt); convert_pfn_mfn(init_top_pgt);
/* L3_i[0] -> level2_ident_pgt */ /* L3_i[0] -> level2_ident_pgt */
convert_pfn_mfn(level3_ident_pgt); convert_pfn_mfn(level3_ident_pgt);
...@@ -1945,10 +1945,10 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1945,10 +1945,10 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* Copy the initial P->M table mappings if necessary. */ /* Copy the initial P->M table mappings if necessary. */
i = pgd_index(xen_start_info->mfn_list); i = pgd_index(xen_start_info->mfn_list);
if (i && i < pgd_index(__START_KERNEL_map)) if (i && i < pgd_index(__START_KERNEL_map))
init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i]; init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
/* Make pagetable pieces RO */ /* Make pagetable pieces RO */
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
...@@ -1959,7 +1959,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1959,7 +1959,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* Pin down new L4 */ /* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
PFN_DOWN(__pa_symbol(init_level4_pgt))); PFN_DOWN(__pa_symbol(init_top_pgt)));
/* Unpin Xen-provided one */ /* Unpin Xen-provided one */
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
...@@ -1969,7 +1969,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) ...@@ -1969,7 +1969,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
* attach it to, so make sure we just set kernel pgd. * attach it to, so make sure we just set kernel pgd.
*/ */
xen_mc_batch(); xen_mc_batch();
__xen_write_cr3(true, __pa(init_level4_pgt)); __xen_write_cr3(true, __pa(init_top_pgt));
xen_mc_issue(PARAVIRT_LAZY_CPU); xen_mc_issue(PARAVIRT_LAZY_CPU);
/* We can't that easily rip out L3 and L2, as the Xen pagetables are /* We can't that easily rip out L3 and L2, as the Xen pagetables are
......
...@@ -87,7 +87,7 @@ ENTRY(pvh_start_xen) ...@@ -87,7 +87,7 @@ ENTRY(pvh_start_xen)
wrmsr wrmsr
/* Enable pre-constructed page tables. */ /* Enable pre-constructed page tables. */
mov $_pa(init_level4_pgt), %eax mov $_pa(init_top_pgt), %eax
mov %eax, %cr3 mov %eax, %cr3
mov $(X86_CR0_PG | X86_CR0_PE), %eax mov $(X86_CR0_PG | X86_CR0_PE), %eax
mov %eax, %cr0 mov %eax, %cr0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment