Commit be354f40 authored by Linus Torvalds's avatar Linus Torvalds

Revert "x86, mm: Include the entire kernel memory map in trampoline_pgd"

This reverts commit 53b87cf0.

It causes odd bootup problems on x86-64.  Markus Trippelsdorf gets a
repeatable oops, and I see a non-repeatable oops (or constant stream of
messages that scroll off too quickly to read) that seems to go away with
this commit reverted.

So we don't know exactly what is wrong with the commit, but it's
definitely problematic, and worth reverting sooner rather than later.
Bisected-by: default avatarMarkus Trippelsdorf <markus@trippelsdorf.de>
Cc: H Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Matt Fleming <matt.fleming@intel.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7a280cf5
...@@ -108,13 +108,13 @@ void sync_global_pgds(unsigned long start, unsigned long end) ...@@ -108,13 +108,13 @@ void sync_global_pgds(unsigned long start, unsigned long end)
for (address = start; address <= end; address += PGDIR_SIZE) { for (address = start; address <= end; address += PGDIR_SIZE) {
const pgd_t *pgd_ref = pgd_offset_k(address); const pgd_t *pgd_ref = pgd_offset_k(address);
struct page *page; struct page *page;
pgd_t *pgd;
if (pgd_none(*pgd_ref)) if (pgd_none(*pgd_ref))
continue; continue;
spin_lock(&pgd_lock); spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) { list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
spinlock_t *pgt_lock; spinlock_t *pgt_lock;
pgd = (pgd_t *)page_address(page) + pgd_index(address); pgd = (pgd_t *)page_address(page) + pgd_index(address);
...@@ -130,13 +130,6 @@ void sync_global_pgds(unsigned long start, unsigned long end) ...@@ -130,13 +130,6 @@ void sync_global_pgds(unsigned long start, unsigned long end)
spin_unlock(pgt_lock); spin_unlock(pgt_lock);
} }
pgd = __va(real_mode_header->trampoline_pgd);
pgd += pgd_index(address);
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
spin_unlock(&pgd_lock); spin_unlock(&pgd_lock);
} }
} }
......
...@@ -50,107 +50,6 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, ...@@ -50,107 +50,6 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
return err; return err;
} }
#ifdef CONFIG_X86_64
static void ident_pte_range(unsigned long paddr, unsigned long vaddr,
pmd_t *ppmd, pmd_t *vpmd, unsigned long end)
{
pte_t *ppte = pte_offset_kernel(ppmd, paddr);
pte_t *vpte = pte_offset_kernel(vpmd, vaddr);
do {
set_pte(ppte, *vpte);
} while (ppte++, vpte++, vaddr += PAGE_SIZE, vaddr != end);
}
static int ident_pmd_range(unsigned long paddr, unsigned long vaddr,
pud_t *ppud, pud_t *vpud, unsigned long end)
{
pmd_t *ppmd = pmd_offset(ppud, paddr);
pmd_t *vpmd = pmd_offset(vpud, vaddr);
unsigned long next;
do {
next = pmd_addr_end(vaddr, end);
if (!pmd_present(*ppmd)) {
pte_t *ppte = (pte_t *)get_zeroed_page(GFP_KERNEL);
if (!ppte)
return 1;
set_pmd(ppmd, __pmd(_KERNPG_TABLE | __pa(ppte)));
}
ident_pte_range(paddr, vaddr, ppmd, vpmd, next);
} while (ppmd++, vpmd++, vaddr = next, vaddr != end);
return 0;
}
static int ident_pud_range(unsigned long paddr, unsigned long vaddr,
pgd_t *ppgd, pgd_t *vpgd, unsigned long end)
{
pud_t *ppud = pud_offset(ppgd, paddr);
pud_t *vpud = pud_offset(vpgd, vaddr);
unsigned long next;
do {
next = pud_addr_end(vaddr, end);
if (!pud_present(*ppud)) {
pmd_t *ppmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
if (!ppmd)
return 1;
set_pud(ppud, __pud(_KERNPG_TABLE | __pa(ppmd)));
}
if (ident_pmd_range(paddr, vaddr, ppud, vpud, next))
return 1;
} while (ppud++, vpud++, vaddr = next, vaddr != end);
return 0;
}
static int insert_identity_mapping(resource_size_t paddr, unsigned long vaddr,
unsigned long size)
{
unsigned long end = vaddr + size;
unsigned long next;
pgd_t *vpgd, *ppgd;
/* Don't map over the guard hole. */
if (paddr >= 0x800000000000 || paddr + size > 0x800000000000)
return 1;
ppgd = __va(real_mode_header->trampoline_pgd) + pgd_index(paddr);
vpgd = pgd_offset_k(vaddr);
do {
next = pgd_addr_end(vaddr, end);
if (!pgd_present(*ppgd)) {
pud_t *ppud = (pud_t *)get_zeroed_page(GFP_KERNEL);
if (!ppud)
return 1;
set_pgd(ppgd, __pgd(_KERNPG_TABLE | __pa(ppud)));
}
if (ident_pud_range(paddr, vaddr, ppgd, vpgd, next))
return 1;
} while (ppgd++, vpgd++, vaddr = next, vaddr != end);
return 0;
}
#else
static inline int insert_identity_mapping(resource_size_t paddr,
unsigned long vaddr,
unsigned long size)
{
return 0;
}
#endif /* CONFIG_X86_64 */
/* /*
* Remap an arbitrary physical address space into the kernel virtual * Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses * address space. Needed when the kernel wants to access high addresses
...@@ -264,10 +163,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, ...@@ -264,10 +163,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
ret_addr = (void __iomem *) (vaddr + offset); ret_addr = (void __iomem *) (vaddr + offset);
mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
if (insert_identity_mapping(phys_addr, vaddr, size))
printk(KERN_WARNING "ioremap: unable to map 0x%llx in identity pagetable\n",
(unsigned long long)phys_addr);
/* /*
* Check if the request spans more than any BAR in the iomem resource * Check if the request spans more than any BAR in the iomem resource
* tree. * tree.
......
...@@ -78,21 +78,8 @@ void __init setup_real_mode(void) ...@@ -78,21 +78,8 @@ void __init setup_real_mode(void)
*trampoline_cr4_features = read_cr4(); *trampoline_cr4_features = read_cr4();
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
/* trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
* Create an identity mapping for all of physical memory.
*/
for (i = 0; i <= pgd_index(max_pfn << PAGE_SHIFT); i++) {
int index = pgd_index(PAGE_OFFSET) + i;
trampoline_pgd[i] = (u64)pgd_val(swapper_pg_dir[index]);
}
/*
* Copy the upper-half of the kernel pages tables.
*/
for (i = pgd_index(PAGE_OFFSET); i < PTRS_PER_PGD; i++)
trampoline_pgd[i] = (u64)pgd_val(swapper_pg_dir[i]);
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment