Commit 1b8c78be authored by Yinghai Lu's avatar Yinghai Lu Committed by H. Peter Anvin

x86: Merge early_reserve_initrd for 32bit and 64bit

They are the same, could move them out from head32/64.c to setup.c.

We are using memblock, and it could handle overlapping properly, so
we don't need to reserve some at first to hold the location, and just
need to make sure we reserve them before we are using memblock to find
free mem to use.
Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1359058816-7615-15-git-send-email-yinghai@kernel.orgReviewed-by: default avatarPekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 10054230
...@@ -36,17 +36,6 @@ void __init i386_start_kernel(void) ...@@ -36,17 +36,6 @@ void __init i386_start_kernel(void)
memblock_reserve(__pa_symbol(&_text), memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&__bss_stop) - __pa_symbol(&_text)); __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
/* Assume only end is not page aligned */
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
}
#endif
/* Call the subarch specific early setup function */ /* Call the subarch specific early setup function */
switch (boot_params.hdr.hardware_subarch) { switch (boot_params.hdr.hardware_subarch) {
case X86_SUBARCH_MRST: case X86_SUBARCH_MRST:
......
...@@ -178,17 +178,6 @@ void __init x86_64_start_reservations(char *real_mode_data) ...@@ -178,17 +178,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
memblock_reserve(__pa_symbol(&_text), memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&__bss_stop) - __pa_symbol(&_text)); __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
/* Assume only end is not page aligned */
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
}
#endif
reserve_ebda_region(); reserve_ebda_region();
/* /*
......
...@@ -360,6 +360,19 @@ static u64 __init get_mem_size(unsigned long limit_pfn) ...@@ -360,6 +360,19 @@ static u64 __init get_mem_size(unsigned long limit_pfn)
return mapped_pages << PAGE_SHIFT; return mapped_pages << PAGE_SHIFT;
} }
static void __init early_reserve_initrd(void)
{
/* Assume only end is not page aligned */
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
if (!boot_params.hdr.type_of_loader ||
!ramdisk_image || !ramdisk_size)
return; /* No initrd provided by bootloader */
memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
}
static void __init reserve_initrd(void) static void __init reserve_initrd(void)
{ {
/* Assume only end is not page aligned */ /* Assume only end is not page aligned */
...@@ -386,10 +399,6 @@ static void __init reserve_initrd(void) ...@@ -386,10 +399,6 @@ static void __init reserve_initrd(void)
if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image), if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
PFN_DOWN(ramdisk_end))) { PFN_DOWN(ramdisk_end))) {
/* All are mapped, easy case */ /* All are mapped, easy case */
/*
* don't need to reserve again, already reserved early
* in i386_start_kernel
*/
initrd_start = ramdisk_image + PAGE_OFFSET; initrd_start = ramdisk_image + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size; initrd_end = initrd_start + ramdisk_size;
return; return;
...@@ -400,6 +409,9 @@ static void __init reserve_initrd(void) ...@@ -400,6 +409,9 @@ static void __init reserve_initrd(void)
memblock_free(ramdisk_image, ramdisk_end - ramdisk_image); memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
} }
#else #else
static void __init early_reserve_initrd(void)
{
}
static void __init reserve_initrd(void) static void __init reserve_initrd(void)
{ {
} }
...@@ -760,6 +772,8 @@ early_param("reservelow", parse_reservelow); ...@@ -760,6 +772,8 @@ early_param("reservelow", parse_reservelow);
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
early_reserve_initrd();
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
visws_early_detect(); visws_early_detect();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment