Commit f2d3efed authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] x86_64: Implement early DMI scanning

There are more and more cases where we need to know DMI information
early to work around bugs.  i386 already had early DMI scanning, but
x86-64 didn't.  Implement this now.

This required some cleanup in the i386 code.
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f083a329
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/dmi.h> #include <linux/dmi.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/dmi.h>
static char * __init dmi_string(struct dmi_header *dm, u8 s) static char * __init dmi_string(struct dmi_header *dm, u8 s)
{ {
......
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
#include <asm/swiotlb.h> #include <asm/swiotlb.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/gart-mapping.h> #include <asm/gart-mapping.h>
#include <asm/dmi.h>
/* /*
* Machine setup.. * Machine setup..
...@@ -92,6 +93,12 @@ int bootloader_type; ...@@ -92,6 +93,12 @@ int bootloader_type;
unsigned long saved_video_mode; unsigned long saved_video_mode;
/*
* Early DMI memory
*/
int dmi_alloc_index;
char dmi_alloc_data[DMI_MAX_DATA];
/* /*
* Setup options * Setup options
*/ */
...@@ -620,6 +627,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -620,6 +627,8 @@ void __init setup_arch(char **cmdline_p)
init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
dmi_scan_machine();
zap_low_mappings(0); zap_low_mappings(0);
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
...@@ -1412,10 +1421,3 @@ struct seq_operations cpuinfo_op = { ...@@ -1412,10 +1421,3 @@ struct seq_operations cpuinfo_op = {
.show = show_cpuinfo, .show = show_cpuinfo,
}; };
static int __init run_dmi_scan(void)
{
dmi_scan_machine();
return 0;
}
core_initcall(run_dmi_scan);
...@@ -225,6 +225,33 @@ static __meminit void unmap_low_page(int i) ...@@ -225,6 +225,33 @@ static __meminit void unmap_low_page(int i)
ti->allocated = 0; ti->allocated = 0;
} }
/* Must run before zap_low_mappings */
__init void *early_ioremap(unsigned long addr, unsigned long size)
{
unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
/* actually usually some more */
if (size >= LARGE_PAGE_SIZE) {
printk("SMBIOS area too long %lu\n", size);
return NULL;
}
set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
map += LARGE_PAGE_SIZE;
set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
__flush_tlb();
return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
}
/* To avoid virtual aliases later */
__init void early_iounmap(void *addr, unsigned long size)
{
if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
printk("early_iounmap: bad address %p\n", addr);
set_pmd(temp_mappings[0].pmd, __pmd(0));
set_pmd(temp_mappings[1].pmd, __pmd(0));
__flush_tlb();
}
static void __meminit static void __meminit
phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
{ {
......
#ifndef _ASM_DMI_H
#define _ASM_DMI_H 1
#include <asm/io.h>
/* Use early IO mappings for DMI because it's initialized early */
#define dmi_ioremap bt_ioremap
#define dmi_iounmap bt_iounmap
#define dmi_alloc alloc_bootmem
#endif
#ifndef _ASM_DMI_H
#define _ASM_DMI_H 1
#include <asm/io.h>
extern void *dmi_ioremap(unsigned long addr, unsigned long size);
extern void dmi_iounmap(void *addr, unsigned long size);
#define DMI_MAX_DATA 2048
extern int dmi_alloc_index;
extern char dmi_alloc_data[DMI_MAX_DATA];
/* This is so early that there is no good way to allocate dynamic memory.
Allocate data in an BSS array. */
static inline void *dmi_alloc(unsigned len)
{
int idx = dmi_alloc_index;
if ((dmi_alloc_index += len) > DMI_MAX_DATA)
return NULL;
return dmi_alloc_data + idx;
}
#define dmi_ioremap early_ioremap
#define dmi_iounmap early_iounmap
#endif
...@@ -135,6 +135,9 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size) ...@@ -135,6 +135,9 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
return __ioremap(offset, size, 0); return __ioremap(offset, size, 0);
} }
extern void *early_ioremap(unsigned long addr, unsigned long size);
extern void early_iounmap(void *addr, unsigned long size);
/* /*
* This one maps high address device memory and turns off caching for that area. * This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining * it's useful if some control registers are in such an area and write combining
...@@ -143,11 +146,6 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size) ...@@ -143,11 +146,6 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap(volatile void __iomem *addr); extern void iounmap(volatile void __iomem *addr);
/* Use normal IO mappings for DMI */
#define dmi_ioremap ioremap
#define dmi_iounmap(x,l) iounmap(x)
#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
/* /*
* ISA I/O bus memory addresses are 1:1 with the physical address. * ISA I/O bus memory addresses are 1:1 with the physical address.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment