Commit d42b3a29 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 EFI update from Peter Anvin:
 "EFI tree, from Matt Fleming.  Most of the patches are the new efivarfs
  filesystem by Matt Garrett & co.  The balance are support for EFI
  wallclock in the absence of a hardware-specific driver, and various
  fixes and cleanups."

* 'core-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  efivarfs: Make efivarfs_fill_super() static
  x86, efi: Check table header length in efi_bgrt_init()
  efivarfs: Use query_variable_info() to limit kmalloc()
  efivarfs: Fix return value of efivarfs_file_write()
  efivarfs: Return a consistent error when efivarfs_get_inode() fails
  efivarfs: Make 'datasize' unsigned long
  efivarfs: Add unique magic number
  efivarfs: Replace magic number with sizeof(attributes)
  efivarfs: Return an error if we fail to read a variable
  efi: Clarify GUID length calculations
  efivarfs: Implement exclusive access for {get,set}_variable
  efivarfs: efivarfs_fill_super() ensure we clean up correctly on error
  efivarfs: efivarfs_fill_super() ensure we free our temporary name
  efivarfs: efivarfs_fill_super() fix inode reference counts
  efivarfs: efivarfs_create() ensure we drop our reference on inode on error
  efivarfs: efivarfs_file_read ensure we free data in error paths
  x86-64/efi: Use EFI to deal with platform wall clock (again)
  x86/kernel: remove tboot 1:1 page table creation code
  x86, efi: 1:1 pagetable mapping for virtual EFI calls
  x86, mm: Include the entire kernel memory map in trampoline_pgd
  ...
parents 18dd0bf2 e83af1f1
...@@ -38,6 +38,8 @@ dnotify_test.c ...@@ -38,6 +38,8 @@ dnotify_test.c
- example program for dnotify - example program for dnotify
ecryptfs.txt ecryptfs.txt
- docs on eCryptfs: stacked cryptographic filesystem for Linux. - docs on eCryptfs: stacked cryptographic filesystem for Linux.
efivarfs.txt
- info for the efivarfs filesystem.
exofs.txt exofs.txt
- info, usage, mount options, design about EXOFS. - info, usage, mount options, design about EXOFS.
ext2.txt ext2.txt
......
efivarfs - a (U)EFI variable filesystem
The efivarfs filesystem was created to address the shortcomings of
using entries in sysfs to maintain EFI variables. The old sysfs EFI
variables code only supported variables of up to 1024 bytes. This
limitation existed in version 0.99 of the EFI specification, but was
removed before any full releases. Since variables can now be larger
than a single page, sysfs isn't the best interface for this.
Variables can be created, deleted and modified with the efivarfs
filesystem.
efivarfs is typically mounted like this,
mount -t efivarfs none /sys/firmware/efi/efivars
...@@ -69,23 +69,37 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, ...@@ -69,23 +69,37 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \ efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
(u64)(a4), (u64)(a5), (u64)(a6)) (u64)(a4), (u64)(a5), (u64)(a6))
extern unsigned long efi_call_virt_prelog(void);
extern void efi_call_virt_epilog(unsigned long);
#define efi_callx(x, func, ...) \
({ \
efi_status_t __status; \
unsigned long __pgd; \
\
__pgd = efi_call_virt_prelog(); \
__status = efi_call##x(func, __VA_ARGS__); \
efi_call_virt_epilog(__pgd); \
__status; \
})
#define efi_call_virt0(f) \ #define efi_call_virt0(f) \
efi_call0((void *)(efi.systab->runtime->f)) efi_callx(0, (void *)(efi.systab->runtime->f))
#define efi_call_virt1(f, a1) \ #define efi_call_virt1(f, a1) \
efi_call1((void *)(efi.systab->runtime->f), (u64)(a1)) efi_callx(1, (void *)(efi.systab->runtime->f), (u64)(a1))
#define efi_call_virt2(f, a1, a2) \ #define efi_call_virt2(f, a1, a2) \
efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2)) efi_callx(2, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2))
#define efi_call_virt3(f, a1, a2, a3) \ #define efi_call_virt3(f, a1, a2, a3) \
efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ efi_callx(3, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3)) (u64)(a3))
#define efi_call_virt4(f, a1, a2, a3, a4) \ #define efi_call_virt4(f, a1, a2, a3, a4) \
efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ efi_callx(4, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4)) (u64)(a3), (u64)(a4))
#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ #define efi_call_virt5(f, a1, a2, a3, a4, a5) \
efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ efi_callx(5, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5)) (u64)(a3), (u64)(a4), (u64)(a5))
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ efi_callx(6, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
......
...@@ -103,71 +103,13 @@ void __init tboot_probe(void) ...@@ -103,71 +103,13 @@ void __init tboot_probe(void)
pr_debug("tboot_size: 0x%x\n", tboot->tboot_size); pr_debug("tboot_size: 0x%x\n", tboot->tboot_size);
} }
static pgd_t *tboot_pg_dir;
static struct mm_struct tboot_mm = {
.mm_rb = RB_ROOT,
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
.mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
};
static inline void switch_to_tboot_pt(void) static inline void switch_to_tboot_pt(void)
{ {
write_cr3(virt_to_phys(tboot_pg_dir)); #ifdef CONFIG_X86_32
} load_cr3(initial_page_table);
#else
static int map_tboot_page(unsigned long vaddr, unsigned long pfn, write_cr3(real_mode_header->trampoline_pgd);
pgprot_t prot) #endif
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset(&tboot_mm, vaddr);
pud = pud_alloc(&tboot_mm, pgd, vaddr);
if (!pud)
return -1;
pmd = pmd_alloc(&tboot_mm, pud, vaddr);
if (!pmd)
return -1;
pte = pte_alloc_map(&tboot_mm, NULL, pmd, vaddr);
if (!pte)
return -1;
set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
pte_unmap(pte);
return 0;
}
static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn,
unsigned long nr)
{
/* Reuse the original kernel mapping */
tboot_pg_dir = pgd_alloc(&tboot_mm);
if (!tboot_pg_dir)
return -1;
for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) {
if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC))
return -1;
}
return 0;
}
static void tboot_create_trampoline(void)
{
u32 map_base, map_size;
/* Create identity map for tboot shutdown code. */
map_base = PFN_DOWN(tboot->tboot_base);
map_size = PFN_UP(tboot->tboot_size);
if (map_tboot_pages(map_base << PAGE_SHIFT, map_base, map_size))
panic("tboot: Error mapping tboot pages (mfns) @ 0x%x, 0x%x\n",
map_base, map_size);
} }
#ifdef CONFIG_ACPI_SLEEP #ifdef CONFIG_ACPI_SLEEP
...@@ -225,14 +167,6 @@ void tboot_shutdown(u32 shutdown_type) ...@@ -225,14 +167,6 @@ void tboot_shutdown(u32 shutdown_type)
if (!tboot_enabled()) if (!tboot_enabled())
return; return;
/*
* if we're being called before the 1:1 mapping is set up then just
* return and let the normal shutdown happen; this should only be
* due to very early panic()
*/
if (!tboot_pg_dir)
return;
/* if this is S3 then set regions to MAC */ /* if this is S3 then set regions to MAC */
if (shutdown_type == TB_SHUTDOWN_S3) if (shutdown_type == TB_SHUTDOWN_S3)
if (tboot_setup_sleep()) if (tboot_setup_sleep())
...@@ -343,8 +277,6 @@ static __init int tboot_late_init(void) ...@@ -343,8 +277,6 @@ static __init int tboot_late_init(void)
if (!tboot_enabled()) if (!tboot_enabled())
return 0; return 0;
tboot_create_trampoline();
atomic_set(&ap_wfs_count, 0); atomic_set(&ap_wfs_count, 0);
register_hotcpu_notifier(&tboot_cpu_notifier); register_hotcpu_notifier(&tboot_cpu_notifier);
......
...@@ -108,13 +108,13 @@ void sync_global_pgds(unsigned long start, unsigned long end) ...@@ -108,13 +108,13 @@ void sync_global_pgds(unsigned long start, unsigned long end)
for (address = start; address <= end; address += PGDIR_SIZE) { for (address = start; address <= end; address += PGDIR_SIZE) {
const pgd_t *pgd_ref = pgd_offset_k(address); const pgd_t *pgd_ref = pgd_offset_k(address);
struct page *page; struct page *page;
pgd_t *pgd;
if (pgd_none(*pgd_ref)) if (pgd_none(*pgd_ref))
continue; continue;
spin_lock(&pgd_lock); spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) { list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
spinlock_t *pgt_lock; spinlock_t *pgt_lock;
pgd = (pgd_t *)page_address(page) + pgd_index(address); pgd = (pgd_t *)page_address(page) + pgd_index(address);
...@@ -130,6 +130,13 @@ void sync_global_pgds(unsigned long start, unsigned long end) ...@@ -130,6 +130,13 @@ void sync_global_pgds(unsigned long start, unsigned long end)
spin_unlock(pgt_lock); spin_unlock(pgt_lock);
} }
pgd = __va(real_mode_header->trampoline_pgd);
pgd += pgd_index(address);
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
spin_unlock(&pgd_lock); spin_unlock(&pgd_lock);
} }
} }
......
...@@ -50,6 +50,107 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, ...@@ -50,6 +50,107 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
return err; return err;
} }
#ifdef CONFIG_X86_64
static void ident_pte_range(unsigned long paddr, unsigned long vaddr,
pmd_t *ppmd, pmd_t *vpmd, unsigned long end)
{
pte_t *ppte = pte_offset_kernel(ppmd, paddr);
pte_t *vpte = pte_offset_kernel(vpmd, vaddr);
do {
set_pte(ppte, *vpte);
} while (ppte++, vpte++, vaddr += PAGE_SIZE, vaddr != end);
}
static int ident_pmd_range(unsigned long paddr, unsigned long vaddr,
pud_t *ppud, pud_t *vpud, unsigned long end)
{
pmd_t *ppmd = pmd_offset(ppud, paddr);
pmd_t *vpmd = pmd_offset(vpud, vaddr);
unsigned long next;
do {
next = pmd_addr_end(vaddr, end);
if (!pmd_present(*ppmd)) {
pte_t *ppte = (pte_t *)get_zeroed_page(GFP_KERNEL);
if (!ppte)
return 1;
set_pmd(ppmd, __pmd(_KERNPG_TABLE | __pa(ppte)));
}
ident_pte_range(paddr, vaddr, ppmd, vpmd, next);
} while (ppmd++, vpmd++, vaddr = next, vaddr != end);
return 0;
}
static int ident_pud_range(unsigned long paddr, unsigned long vaddr,
pgd_t *ppgd, pgd_t *vpgd, unsigned long end)
{
pud_t *ppud = pud_offset(ppgd, paddr);
pud_t *vpud = pud_offset(vpgd, vaddr);
unsigned long next;
do {
next = pud_addr_end(vaddr, end);
if (!pud_present(*ppud)) {
pmd_t *ppmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
if (!ppmd)
return 1;
set_pud(ppud, __pud(_KERNPG_TABLE | __pa(ppmd)));
}
if (ident_pmd_range(paddr, vaddr, ppud, vpud, next))
return 1;
} while (ppud++, vpud++, vaddr = next, vaddr != end);
return 0;
}
static int insert_identity_mapping(resource_size_t paddr, unsigned long vaddr,
unsigned long size)
{
unsigned long end = vaddr + size;
unsigned long next;
pgd_t *vpgd, *ppgd;
/* Don't map over the guard hole. */
if (paddr >= 0x800000000000 || paddr + size > 0x800000000000)
return 1;
ppgd = __va(real_mode_header->trampoline_pgd) + pgd_index(paddr);
vpgd = pgd_offset_k(vaddr);
do {
next = pgd_addr_end(vaddr, end);
if (!pgd_present(*ppgd)) {
pud_t *ppud = (pud_t *)get_zeroed_page(GFP_KERNEL);
if (!ppud)
return 1;
set_pgd(ppgd, __pgd(_KERNPG_TABLE | __pa(ppud)));
}
if (ident_pud_range(paddr, vaddr, ppgd, vpgd, next))
return 1;
} while (ppgd++, vpgd++, vaddr = next, vaddr != end);
return 0;
}
#else
static inline int insert_identity_mapping(resource_size_t paddr,
unsigned long vaddr,
unsigned long size)
{
return 0;
}
#endif /* CONFIG_X86_64 */
/* /*
* Remap an arbitrary physical address space into the kernel virtual * Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses * address space. Needed when the kernel wants to access high addresses
...@@ -163,6 +264,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, ...@@ -163,6 +264,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
ret_addr = (void __iomem *) (vaddr + offset); ret_addr = (void __iomem *) (vaddr + offset);
mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
if (insert_identity_mapping(phys_addr, vaddr, size))
printk(KERN_WARNING "ioremap: unable to map 0x%llx in identity pagetable\n",
(unsigned long long)phys_addr);
/* /*
* Check if the request spans more than any BAR in the iomem resource * Check if the request spans more than any BAR in the iomem resource
* tree. * tree.
......
...@@ -919,11 +919,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, ...@@ -919,11 +919,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
/* /*
* On success we use clflush, when the CPU supports it to * On success we use clflush, when the CPU supports it to
* avoid the wbindv. If the CPU does not support it and in the * avoid the wbindv. If the CPU does not support it, in the
* error case we fall back to cpa_flush_all (which uses * error case, and during early boot (for EFI) we fall back
* wbindv): * to cpa_flush_all (which uses wbinvd):
*/ */
if (!ret && cpu_has_clflush) { if (early_boot_irqs_disabled)
__cpa_flush_all((void *)(long)cache);
else if (!ret && cpu_has_clflush) {
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
cpa_flush_array(addr, numpages, cache, cpa_flush_array(addr, numpages, cache,
cpa.flags, pages); cpa.flags, pages);
......
...@@ -39,6 +39,8 @@ void efi_bgrt_init(void) ...@@ -39,6 +39,8 @@ void efi_bgrt_init(void)
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
return; return;
if (bgrt_tab->header.length < sizeof(*bgrt_tab))
return;
if (bgrt_tab->version != 1) if (bgrt_tab->version != 1)
return; return;
if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address) if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address)
......
...@@ -239,22 +239,7 @@ static efi_status_t __init phys_efi_set_virtual_address_map( ...@@ -239,22 +239,7 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
return status; return status;
} }
static efi_status_t __init phys_efi_get_time(efi_time_t *tm, static int efi_set_rtc_mmss(unsigned long nowtime)
efi_time_cap_t *tc)
{
unsigned long flags;
efi_status_t status;
spin_lock_irqsave(&rtc_lock, flags);
efi_call_phys_prelog();
status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
virt_to_phys(tc));
efi_call_phys_epilog();
spin_unlock_irqrestore(&rtc_lock, flags);
return status;
}
int efi_set_rtc_mmss(unsigned long nowtime)
{ {
int real_seconds, real_minutes; int real_seconds, real_minutes;
efi_status_t status; efi_status_t status;
...@@ -283,7 +268,7 @@ int efi_set_rtc_mmss(unsigned long nowtime) ...@@ -283,7 +268,7 @@ int efi_set_rtc_mmss(unsigned long nowtime)
return 0; return 0;
} }
unsigned long efi_get_time(void) static unsigned long efi_get_time(void)
{ {
efi_status_t status; efi_status_t status;
efi_time_t eft; efi_time_t eft;
...@@ -639,18 +624,13 @@ static int __init efi_runtime_init(void) ...@@ -639,18 +624,13 @@ static int __init efi_runtime_init(void)
} }
/* /*
* We will only need *early* access to the following * We will only need *early* access to the following
* two EFI runtime services before set_virtual_address_map * EFI runtime service before set_virtual_address_map
* is invoked. * is invoked.
*/ */
efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
efi_phys.set_virtual_address_map = efi_phys.set_virtual_address_map =
(efi_set_virtual_address_map_t *) (efi_set_virtual_address_map_t *)
runtime->set_virtual_address_map; runtime->set_virtual_address_map;
/*
* Make efi_get_time can be called before entering
* virtual mode.
*/
efi.get_time = phys_efi_get_time;
early_iounmap(runtime, sizeof(efi_runtime_services_t)); early_iounmap(runtime, sizeof(efi_runtime_services_t));
return 0; return 0;
...@@ -736,12 +716,10 @@ void __init efi_init(void) ...@@ -736,12 +716,10 @@ void __init efi_init(void)
efi_enabled = 0; efi_enabled = 0;
return; return;
} }
#ifdef CONFIG_X86_32
if (efi_is_native()) { if (efi_is_native()) {
x86_platform.get_wallclock = efi_get_time; x86_platform.get_wallclock = efi_get_time;
x86_platform.set_wallclock = efi_set_rtc_mmss; x86_platform.set_wallclock = efi_set_rtc_mmss;
} }
#endif
#if EFI_DEBUG #if EFI_DEBUG
print_efi_memmap(); print_efi_memmap();
......
...@@ -58,6 +58,21 @@ static void __init early_code_mapping_set_exec(int executable) ...@@ -58,6 +58,21 @@ static void __init early_code_mapping_set_exec(int executable)
} }
} }
unsigned long efi_call_virt_prelog(void)
{
unsigned long saved;
saved = read_cr3();
write_cr3(real_mode_header->trampoline_pgd);
return saved;
}
void efi_call_virt_epilog(unsigned long saved)
{
write_cr3(saved);
}
void __init efi_call_phys_prelog(void) void __init efi_call_phys_prelog(void)
{ {
unsigned long vaddress; unsigned long vaddress;
......
...@@ -78,8 +78,21 @@ void __init setup_real_mode(void) ...@@ -78,8 +78,21 @@ void __init setup_real_mode(void)
*trampoline_cr4_features = read_cr4(); *trampoline_cr4_features = read_cr4();
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE; /*
* Create an identity mapping for all of physical memory.
*/
for (i = 0; i <= pgd_index(max_pfn << PAGE_SHIFT); i++) {
int index = pgd_index(PAGE_OFFSET) + i;
trampoline_pgd[i] = (u64)pgd_val(swapper_pg_dir[index]);
}
/*
* Copy the upper-half of the kernel pages tables.
*/
for (i = pgd_index(PAGE_OFFSET); i < PTRS_PER_PGD; i++)
trampoline_pgd[i] = (u64)pgd_val(swapper_pg_dir[i]);
#endif #endif
} }
......
This diff is collapsed.
...@@ -29,7 +29,12 @@ ...@@ -29,7 +29,12 @@
#define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1))) #define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1)))
#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1))) #define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1))) #define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1)))
#define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1)))
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1))) #define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
typedef unsigned long efi_status_t; typedef unsigned long efi_status_t;
typedef u8 efi_bool_t; typedef u8 efi_bool_t;
...@@ -582,8 +587,6 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); ...@@ -582,8 +587,6 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void); extern int __init efi_uart_console_only (void);
extern void efi_initialize_iomem_resources(struct resource *code_resource, extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource); struct resource *data_resource, struct resource *bss_resource);
extern unsigned long efi_get_time(void);
extern int efi_set_rtc_mmss(unsigned long nowtime);
extern void efi_reserve_boot_services(void); extern void efi_reserve_boot_services(void);
extern struct efi_memory_map memmap; extern struct efi_memory_map memmap;
...@@ -729,6 +732,7 @@ struct efivars { ...@@ -729,6 +732,7 @@ struct efivars {
spinlock_t lock; spinlock_t lock;
struct list_head list; struct list_head list;
struct kset *kset; struct kset *kset;
struct kobject *kobject;
struct bin_attribute *new_var, *del_var; struct bin_attribute *new_var, *del_var;
const struct efivar_operations *ops; const struct efivar_operations *ops;
struct efivar_entry *walk_entry; struct efivar_entry *walk_entry;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define ISOFS_SUPER_MAGIC 0x9660 #define ISOFS_SUPER_MAGIC 0x9660
#define JFFS2_SUPER_MAGIC 0x72b6 #define JFFS2_SUPER_MAGIC 0x72b6
#define PSTOREFS_MAGIC 0x6165676C #define PSTOREFS_MAGIC 0x6165676C
#define EFIVARFS_MAGIC 0xde5e81e4
#define MINIX_SUPER_MAGIC 0x137F /* minix v1 fs, 14 char names */ #define MINIX_SUPER_MAGIC 0x137F /* minix v1 fs, 14 char names */
#define MINIX_SUPER_MAGIC2 0x138F /* minix v1 fs, 30 char names */ #define MINIX_SUPER_MAGIC2 0x138F /* minix v1 fs, 30 char names */
......
...@@ -463,6 +463,10 @@ static void __init mm_init(void) ...@@ -463,6 +463,10 @@ static void __init mm_init(void)
percpu_init_late(); percpu_init_late();
pgtable_cache_init(); pgtable_cache_init();
vmalloc_init(); vmalloc_init();
#ifdef CONFIG_X86
if (efi_enabled)
efi_enter_virtual_mode();
#endif
} }
asmlinkage void __init start_kernel(void) asmlinkage void __init start_kernel(void)
...@@ -603,10 +607,6 @@ asmlinkage void __init start_kernel(void) ...@@ -603,10 +607,6 @@ asmlinkage void __init start_kernel(void)
calibrate_delay(); calibrate_delay();
pidmap_init(); pidmap_init();
anon_vma_init(); anon_vma_init();
#ifdef CONFIG_X86
if (efi_enabled)
efi_enter_virtual_mode();
#endif
thread_info_cache_init(); thread_info_cache_init();
cred_init(); cred_init();
fork_init(totalram_pages); fork_init(totalram_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment