Commit bf64f051 authored by Vasily Gorbik's avatar Vasily Gorbik Committed by Heiko Carstens

s390/mem_detect: handle online memory limit just once

Introduce mem_detect_truncate() to cut any online memory ranges above
established identity mapping size, so that mem_detect users wouldn't
have to do it over and over again.
Suggested-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent 22476f47
...@@ -34,6 +34,7 @@ struct vmlinux_info { ...@@ -34,6 +34,7 @@ struct vmlinux_info {
void startup_kernel(void); void startup_kernel(void);
unsigned long detect_memory(unsigned long *safe_addr); unsigned long detect_memory(unsigned long *safe_addr);
void mem_detect_truncate(unsigned long limit);
bool is_ipl_block_dump(void); bool is_ipl_block_dump(void);
void store_ipl_parmblock(void); void store_ipl_parmblock(void);
unsigned long read_ipl_report(unsigned long safe_addr); unsigned long read_ipl_report(unsigned long safe_addr);
...@@ -44,7 +45,7 @@ void print_missing_facilities(void); ...@@ -44,7 +45,7 @@ void print_missing_facilities(void);
void sclp_early_setup_buffer(void); void sclp_early_setup_buffer(void);
void print_pgm_check_info(void); void print_pgm_check_info(void);
unsigned long get_random_base(unsigned long safe_addr); unsigned long get_random_base(unsigned long safe_addr);
void setup_vmem(unsigned long ident_map_size, unsigned long asce_limit); void setup_vmem(unsigned long asce_limit);
void __printf(1, 2) decompressor_printk(const char *fmt, ...); void __printf(1, 2) decompressor_printk(const char *fmt, ...);
void error(char *m); void error(char *m);
......
...@@ -176,8 +176,6 @@ unsigned long get_random_base(unsigned long safe_addr) ...@@ -176,8 +176,6 @@ unsigned long get_random_base(unsigned long safe_addr)
unsigned long base_pos, max_pos, kernel_size; unsigned long base_pos, max_pos, kernel_size;
int i; int i;
memory_limit = min(memory_limit, ident_map_size);
/* /*
* Avoid putting kernel in the end of physical memory * Avoid putting kernel in the end of physical memory
* which kasan will use for shadow memory and early pgtable * which kasan will use for shadow memory and early pgtable
......
...@@ -171,3 +171,21 @@ unsigned long detect_memory(unsigned long *safe_addr) ...@@ -171,3 +171,21 @@ unsigned long detect_memory(unsigned long *safe_addr)
return max_physmem_end; return max_physmem_end;
} }
void mem_detect_truncate(unsigned long limit)
{
struct mem_detect_block *block;
int i;
for (i = 0; i < mem_detect.count; i++) {
block = __get_mem_detect_block_ptr(i);
if (block->start >= limit) {
mem_detect.count = i;
break;
} else if (block->end > limit) {
block->end = (u64)limit;
mem_detect.count = i + 1;
break;
}
}
}
...@@ -305,6 +305,7 @@ void startup_kernel(void) ...@@ -305,6 +305,7 @@ void startup_kernel(void)
setup_ident_map_size(max_physmem_end); setup_ident_map_size(max_physmem_end);
setup_vmalloc_size(); setup_vmalloc_size();
asce_limit = setup_kernel_memory_layout(); asce_limit = setup_kernel_memory_layout();
mem_detect_truncate(ident_map_size);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) { if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
random_lma = get_random_base(safe_addr); random_lma = get_random_base(safe_addr);
...@@ -336,7 +337,7 @@ void startup_kernel(void) ...@@ -336,7 +337,7 @@ void startup_kernel(void)
*/ */
clear_bss_section(); clear_bss_section();
handle_relocs(__kaslr_offset); handle_relocs(__kaslr_offset);
setup_vmem(ident_map_size, asce_limit); setup_vmem(asce_limit);
copy_bootdata(); copy_bootdata();
if (__kaslr_offset) { if (__kaslr_offset) {
......
...@@ -39,7 +39,7 @@ static void boot_check_oom(void) ...@@ -39,7 +39,7 @@ static void boot_check_oom(void)
error("out of memory on boot\n"); error("out of memory on boot\n");
} }
static void pgtable_populate_init(unsigned long ident_map_size) static void pgtable_populate_init(void)
{ {
unsigned long initrd_end; unsigned long initrd_end;
unsigned long kernel_end; unsigned long kernel_end;
...@@ -51,7 +51,7 @@ static void pgtable_populate_init(unsigned long ident_map_size) ...@@ -51,7 +51,7 @@ static void pgtable_populate_init(unsigned long ident_map_size)
pgalloc_low = max(pgalloc_low, initrd_end); pgalloc_low = max(pgalloc_low, initrd_end);
} }
pgalloc_end = round_down(min(ident_map_size, get_mem_detect_end()), PAGE_SIZE); pgalloc_end = round_down(get_mem_detect_end(), PAGE_SIZE);
pgalloc_pos = pgalloc_end; pgalloc_pos = pgalloc_end;
boot_check_oom(); boot_check_oom();
...@@ -226,7 +226,7 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat ...@@ -226,7 +226,7 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
} }
} }
void setup_vmem(unsigned long ident_map_size, unsigned long asce_limit) void setup_vmem(unsigned long asce_limit)
{ {
unsigned long start, end; unsigned long start, end;
unsigned long asce_type; unsigned long asce_type;
...@@ -250,13 +250,10 @@ void setup_vmem(unsigned long ident_map_size, unsigned long asce_limit) ...@@ -250,13 +250,10 @@ void setup_vmem(unsigned long ident_map_size, unsigned long asce_limit)
* To prevent creation of a large page at address 0 first map * To prevent creation of a large page at address 0 first map
* the lowcore and create the identity mapping only afterwards. * the lowcore and create the identity mapping only afterwards.
*/ */
pgtable_populate_init(ident_map_size); pgtable_populate_init();
pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE); pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE);
for_each_mem_detect_block(i, &start, &end) { for_each_mem_detect_block(i, &start, &end)
if (start >= ident_map_size) pgtable_populate(start, end, POPULATE_ONE2ONE);
break;
pgtable_populate(start, min(end, ident_map_size), POPULATE_ONE2ONE);
}
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore), pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
POPULATE_ABS_LOWCORE); POPULATE_ABS_LOWCORE);
pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE, pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE,
......
...@@ -600,7 +600,6 @@ static void __init setup_resources(void) ...@@ -600,7 +600,6 @@ static void __init setup_resources(void)
static void __init setup_memory_end(void) static void __init setup_memory_end(void)
{ {
memblock_remove(ident_map_size, PHYS_ADDR_MAX - ident_map_size);
max_pfn = max_low_pfn = PFN_DOWN(ident_map_size); max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20); pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
} }
......
...@@ -247,8 +247,6 @@ void __init kasan_early_init(void) ...@@ -247,8 +247,6 @@ void __init kasan_early_init(void)
* The rest [memsize, ident_map_size] if memsize < ident_map_size * The rest [memsize, ident_map_size] if memsize < ident_map_size
* could be mapped/unmapped dynamically later during memory hotplug. * could be mapped/unmapped dynamically later during memory hotplug.
*/ */
memsize = min(memsize, ident_map_size);
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment