Commit 9f5a8d61 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Revert "x86/mm/pat: Ensure cpa->pfn only contains page frame numbers"

This reverts commit 87e2bd89 which is
commit edc3b912 upstream.

Turns there was too many other issues with this patch to make it viable
for the stable tree.
Reported-by: default avatarBen Hutchings <ben.hutchings@codethink.co.uk>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Borislav Petkov <bp@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Jones <davej@codemonkey.org.uk>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
Cc: Stephen Smalley <sds@tycho.nsa.gov>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Toshi Kani <toshi.kani@hp.com>
Cc: linux-efi@vger.kernel.org
Cc: Ingo Molnar <mingo@kernel.org>
Cc: "Ghannam, Yazen" <Yazen.Ghannam@amd.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 34933c2c
...@@ -911,10 +911,15 @@ static void populate_pte(struct cpa_data *cpa, ...@@ -911,10 +911,15 @@ static void populate_pte(struct cpa_data *cpa,
pte = pte_offset_kernel(pmd, start); pte = pte_offset_kernel(pmd, start);
while (num_pages-- && start < end) { while (num_pages-- && start < end) {
set_pte(pte, pfn_pte(cpa->pfn, pgprot));
/* deal with the NX bit */
if (!(pgprot_val(pgprot) & _PAGE_NX))
cpa->pfn &= ~_PAGE_NX;
set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
start += PAGE_SIZE; start += PAGE_SIZE;
cpa->pfn++; cpa->pfn += PAGE_SIZE;
pte++; pte++;
} }
} }
...@@ -970,11 +975,11 @@ static int populate_pmd(struct cpa_data *cpa, ...@@ -970,11 +975,11 @@ static int populate_pmd(struct cpa_data *cpa,
pmd = pmd_offset(pud, start); pmd = pmd_offset(pud, start);
set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
massage_pgprot(pmd_pgprot))); massage_pgprot(pmd_pgprot)));
start += PMD_SIZE; start += PMD_SIZE;
cpa->pfn += PMD_SIZE >> PAGE_SHIFT; cpa->pfn += PMD_SIZE;
cur_pages += PMD_SIZE >> PAGE_SHIFT; cur_pages += PMD_SIZE >> PAGE_SHIFT;
} }
...@@ -1043,11 +1048,11 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, ...@@ -1043,11 +1048,11 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
* Map everything starting from the Gb boundary, possibly with 1G pages * Map everything starting from the Gb boundary, possibly with 1G pages
*/ */
while (end - start >= PUD_SIZE) { while (end - start >= PUD_SIZE) {
set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
massage_pgprot(pud_pgprot))); massage_pgprot(pud_pgprot)));
start += PUD_SIZE; start += PUD_SIZE;
cpa->pfn += PUD_SIZE >> PAGE_SHIFT; cpa->pfn += PUD_SIZE;
cur_pages += PUD_SIZE >> PAGE_SHIFT; cur_pages += PUD_SIZE >> PAGE_SHIFT;
pud++; pud++;
} }
......
...@@ -143,7 +143,7 @@ void efi_sync_low_kernel_mappings(void) ...@@ -143,7 +143,7 @@ void efi_sync_low_kernel_mappings(void)
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{ {
unsigned long pfn, text; unsigned long text;
struct page *page; struct page *page;
unsigned npages; unsigned npages;
pgd_t *pgd; pgd_t *pgd;
...@@ -160,8 +160,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) ...@@ -160,8 +160,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
* and ident-map those pages containing the map before calling * and ident-map those pages containing the map before calling
* phys_efi_set_virtual_address_map(). * phys_efi_set_virtual_address_map().
*/ */
pfn = pa_memmap >> PAGE_SHIFT; if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) {
pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap); pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
return 1; return 1;
} }
...@@ -186,9 +185,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) ...@@ -186,9 +185,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
npages = (_end - _text) >> PAGE_SHIFT; npages = (_end - _text) >> PAGE_SHIFT;
text = __pa(_text); text = __pa(_text);
pfn = text >> PAGE_SHIFT;
if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) { if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
pr_err("Failed to map kernel text 1:1\n"); pr_err("Failed to map kernel text 1:1\n");
return 1; return 1;
} }
...@@ -206,14 +204,12 @@ void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) ...@@ -206,14 +204,12 @@ void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
static void __init __map_region(efi_memory_desc_t *md, u64 va) static void __init __map_region(efi_memory_desc_t *md, u64 va)
{ {
pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
unsigned long flags = 0; unsigned long pf = 0;
unsigned long pfn;
if (!(md->attribute & EFI_MEMORY_WB)) if (!(md->attribute & EFI_MEMORY_WB))
flags |= _PAGE_PCD; pf |= _PAGE_PCD;
pfn = md->phys_addr >> PAGE_SHIFT; if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
md->phys_addr, va); md->phys_addr, va);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment