Commit e1ad783b authored by Keith Packard's avatar Keith Packard Committed by Ingo Molnar

Revert "x86, efi: Calling __pa() with an ioremap()ed address is invalid"

This hangs my MacBook Air at boot time; I get no console
messages at all. I reverted this on top of -rc5 and my machine
boots again.

This reverts commit e8c71062.
Signed-off-by: default avatarMatt Fleming <matt.fleming@intel.com>
Signed-off-by: default avatarKeith Packard <keithp@keithp.com>
Acked-by: default avatarH. Peter Anvin <hpa@zytor.com>
Cc: Matthew Garrett <mjg@redhat.com>
Cc: Zhang Rui <rui.zhang@intel.com>
Cc: Huang Ying <huang.ying.caritas@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/1321621751-3650-1-git-send-email-matt@consoleSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 6d3e32e6
...@@ -53,13 +53,6 @@ ...@@ -53,13 +53,6 @@
*/ */
#define E820_RESERVED_KERN 128 #define E820_RESERVED_KERN 128
/*
* Address ranges that need to be mapped by the kernel direct
* mapping. This is used to make sure regions such as
* EFI_RUNTIME_SERVICES_DATA are directly mapped. See setup_arch().
*/
#define E820_RESERVED_EFI 129
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/types.h> #include <linux/types.h>
struct e820entry { struct e820entry {
...@@ -122,7 +115,6 @@ static inline void early_memtest(unsigned long start, unsigned long end) ...@@ -122,7 +115,6 @@ static inline void early_memtest(unsigned long start, unsigned long end)
} }
#endif #endif
extern unsigned long e820_end_pfn(unsigned long limit_pfn, unsigned type);
extern unsigned long e820_end_of_ram_pfn(void); extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void); extern unsigned long e820_end_of_low_ram_pfn(void);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
......
...@@ -33,6 +33,8 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); ...@@ -33,6 +33,8 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
efi_call_virt(f, a1, a2, a3, a4, a5, a6) efi_call_virt(f, a1, a2, a3, a4, a5, a6)
#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
#else /* !CONFIG_X86_32 */ #else /* !CONFIG_X86_32 */
extern u64 efi_call0(void *fp); extern u64 efi_call0(void *fp);
...@@ -82,6 +84,9 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, ...@@ -82,6 +84,9 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
u32 type);
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
extern int add_efi_memmap; extern int add_efi_memmap;
......
...@@ -135,7 +135,6 @@ static void __init e820_print_type(u32 type) ...@@ -135,7 +135,6 @@ static void __init e820_print_type(u32 type)
printk(KERN_CONT "(usable)"); printk(KERN_CONT "(usable)");
break; break;
case E820_RESERVED: case E820_RESERVED:
case E820_RESERVED_EFI:
printk(KERN_CONT "(reserved)"); printk(KERN_CONT "(reserved)");
break; break;
case E820_ACPI: case E820_ACPI:
...@@ -784,7 +783,7 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) ...@@ -784,7 +783,7 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
/* /*
* Find the highest page frame number we have available * Find the highest page frame number we have available
*/ */
unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type) static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
{ {
int i; int i;
unsigned long last_pfn = 0; unsigned long last_pfn = 0;
......
...@@ -691,8 +691,6 @@ early_param("reservelow", parse_reservelow); ...@@ -691,8 +691,6 @@ early_param("reservelow", parse_reservelow);
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
unsigned long end_pfn;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
visws_early_detect(); visws_early_detect();
...@@ -934,24 +932,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -934,24 +932,7 @@ void __init setup_arch(char **cmdline_p)
init_gbpages(); init_gbpages();
/* max_pfn_mapped is updated here */ /* max_pfn_mapped is updated here */
end_pfn = max_low_pfn; max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
#ifdef CONFIG_X86_64
/*
* There may be regions after the last E820_RAM region that we
* want to include in the kernel direct mapping, such as
* EFI_RUNTIME_SERVICES_DATA.
*/
if (efi_enabled) {
unsigned long efi_end;
efi_end = e820_end_pfn(MAXMEM>>PAGE_SHIFT, E820_RESERVED_EFI);
if (efi_end > max_low_pfn)
end_pfn = efi_end;
}
#endif
max_low_pfn_mapped = init_memory_mapping(0, end_pfn << PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped; max_pfn_mapped = max_low_pfn_mapped;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
...@@ -323,13 +323,10 @@ static void __init do_add_efi_memmap(void) ...@@ -323,13 +323,10 @@ static void __init do_add_efi_memmap(void)
case EFI_UNUSABLE_MEMORY: case EFI_UNUSABLE_MEMORY:
e820_type = E820_UNUSABLE; e820_type = E820_UNUSABLE;
break; break;
case EFI_RUNTIME_SERVICES_DATA:
e820_type = E820_RESERVED_EFI;
break;
default: default:
/* /*
* EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
* EFI_MEMORY_MAPPED_IO * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
* EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
*/ */
e820_type = E820_RESERVED; e820_type = E820_RESERVED;
...@@ -674,21 +671,10 @@ void __init efi_enter_virtual_mode(void) ...@@ -674,21 +671,10 @@ void __init efi_enter_virtual_mode(void)
end_pfn = PFN_UP(end); end_pfn = PFN_UP(end);
if (end_pfn <= max_low_pfn_mapped if (end_pfn <= max_low_pfn_mapped
|| (end_pfn > (1UL << (32 - PAGE_SHIFT)) || (end_pfn > (1UL << (32 - PAGE_SHIFT))
&& end_pfn <= max_pfn_mapped)) { && end_pfn <= max_pfn_mapped))
va = __va(md->phys_addr); va = __va(md->phys_addr);
if (!(md->attribute & EFI_MEMORY_WB)) {
addr = (u64) (unsigned long)va;
npages = md->num_pages;
memrange_efi_to_native(&addr, &npages);
set_memory_uc(addr, npages);
}
} else {
if (!(md->attribute & EFI_MEMORY_WB))
va = ioremap_nocache(md->phys_addr, size);
else else
va = ioremap_cache(md->phys_addr, size); va = efi_ioremap(md->phys_addr, size, md->type);
}
md->virt_addr = (u64) (unsigned long) va; md->virt_addr = (u64) (unsigned long) va;
...@@ -698,6 +684,13 @@ void __init efi_enter_virtual_mode(void) ...@@ -698,6 +684,13 @@ void __init efi_enter_virtual_mode(void)
continue; continue;
} }
if (!(md->attribute & EFI_MEMORY_WB)) {
addr = md->virt_addr;
npages = md->num_pages;
memrange_efi_to_native(&addr, &npages);
set_memory_uc(addr, npages);
}
systab = (u64) (unsigned long) efi_phys.systab; systab = (u64) (unsigned long) efi_phys.systab;
if (md->phys_addr <= systab && systab < end) { if (md->phys_addr <= systab && systab < end) {
systab += md->virt_addr - md->phys_addr; systab += md->virt_addr - md->phys_addr;
......
...@@ -80,3 +80,20 @@ void __init efi_call_phys_epilog(void) ...@@ -80,3 +80,20 @@ void __init efi_call_phys_epilog(void)
local_irq_restore(efi_flags); local_irq_restore(efi_flags);
early_code_mapping_set_exec(0); early_code_mapping_set_exec(0);
} }
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
u32 type)
{
unsigned long last_map_pfn;
if (type == EFI_MEMORY_MAPPED_IO)
return ioremap(phys_addr, size);
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
unsigned long top = last_map_pfn << PAGE_SHIFT;
efi_ioremap(top, size - (top - phys_addr), type);
}
return (void __iomem *)__va(phys_addr);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment