Commit af9d00e9 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/radix: Create separate mappings for hot-plugged memory

To enable memory unplug without splitting kernel page table
mapping, we force the max mapping size to the LMB size. LMB
size is the unit in which hypervisor will do memory add/remove
operation.

Pseries systems supports max LMB size of 256MB. Hence on pseries,
we now end up mapping memory with 2M page size instead of 1G. To improve
that we want hypervisor to hint the kernel about the hotplug
memory range. That was added that as part of

commit b6eca183 ("powerpc/kernel: Enables memory
hot-remove after reboot on pseries guests")

But PowerVM doesn't provide that hint yet. Once we get PowerVM
updated, we can then force the 2M mapping only to hot-pluggable
memory region using memblock_is_hotpluggable(). Till then
let's depend on LMB size for finding the mapping page size
for linear range.

With this change KVM guest will also be doing linear mapping with
2M page size.

The actual TLB benefit of mapping guest page table entries with
hugepage size can only be materialized if the partition scoped
entries are also using the same or higher page size. A guest using
1G hugetlbfs backing guest memory can have a performance impact with
the above change.
Signed-off-by: default avatarBharata B Rao <bharata@linux.ibm.com>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
[mpe: Fold in fix from Aneesh spotted by lkp@intel.com]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200709131925.922266-5-aneesh.kumar@linux.ibm.com
parent d6d6ebfc
...@@ -82,6 +82,11 @@ extern unsigned int mmu_pid_bits; ...@@ -82,6 +82,11 @@ extern unsigned int mmu_pid_bits;
/* Base PID to allocate from */ /* Base PID to allocate from */
extern unsigned int mmu_base_pid; extern unsigned int mmu_base_pid;
/*
* memory block size used with radix translation.
*/
extern unsigned int __ro_after_init radix_mem_block_size;
#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4) #define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
#define PRTB_ENTRIES (1ul << mmu_pid_bits) #define PRTB_ENTRIES (1ul << mmu_pid_bits)
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/string_helpers.h> #include <linux/string_helpers.h>
#include <linux/memory.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
...@@ -33,6 +34,7 @@ ...@@ -33,6 +34,7 @@
unsigned int mmu_pid_bits; unsigned int mmu_pid_bits;
unsigned int mmu_base_pid; unsigned int mmu_base_pid;
unsigned int radix_mem_block_size __ro_after_init;
static __ref void *early_alloc_pgtable(unsigned long size, int nid, static __ref void *early_alloc_pgtable(unsigned long size, int nid,
unsigned long region_start, unsigned long region_end) unsigned long region_start, unsigned long region_end)
...@@ -265,6 +267,7 @@ static unsigned long next_boundary(unsigned long addr, unsigned long end) ...@@ -265,6 +267,7 @@ static unsigned long next_boundary(unsigned long addr, unsigned long end)
static int __meminit create_physical_mapping(unsigned long start, static int __meminit create_physical_mapping(unsigned long start,
unsigned long end, unsigned long end,
unsigned long max_mapping_size,
int nid, pgprot_t _prot) int nid, pgprot_t _prot)
{ {
unsigned long vaddr, addr, mapping_size = 0; unsigned long vaddr, addr, mapping_size = 0;
...@@ -278,6 +281,8 @@ static int __meminit create_physical_mapping(unsigned long start, ...@@ -278,6 +281,8 @@ static int __meminit create_physical_mapping(unsigned long start,
int rc; int rc;
gap = next_boundary(addr, end) - addr; gap = next_boundary(addr, end) - addr;
if (gap > max_mapping_size)
gap = max_mapping_size;
previous_size = mapping_size; previous_size = mapping_size;
prev_exec = exec; prev_exec = exec;
...@@ -328,8 +333,9 @@ static void __init radix_init_pgtable(void) ...@@ -328,8 +333,9 @@ static void __init radix_init_pgtable(void)
/* We don't support slb for radix */ /* We don't support slb for radix */
mmu_slb_size = 0; mmu_slb_size = 0;
/* /*
* Create the linear mapping, using standard page size for now * Create the linear mapping
*/ */
for_each_memblock(memory, reg) { for_each_memblock(memory, reg) {
/* /*
...@@ -345,6 +351,7 @@ static void __init radix_init_pgtable(void) ...@@ -345,6 +351,7 @@ static void __init radix_init_pgtable(void)
WARN_ON(create_physical_mapping(reg->base, WARN_ON(create_physical_mapping(reg->base,
reg->base + reg->size, reg->base + reg->size,
radix_mem_block_size,
-1, PAGE_KERNEL)); -1, PAGE_KERNEL));
} }
...@@ -485,6 +492,57 @@ static int __init radix_dt_scan_page_sizes(unsigned long node, ...@@ -485,6 +492,57 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
return 1; return 1;
} }
#ifdef CONFIG_MEMORY_HOTPLUG
static int __init probe_memory_block_size(unsigned long node, const char *uname, int
depth, void *data)
{
unsigned long *mem_block_size = (unsigned long *)data;
const __be64 *prop;
int len;
if (depth != 1)
return 0;
if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
return 0;
prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
if (!prop || len < sizeof(__be64))
/*
* Nothing in the device tree
*/
*mem_block_size = MIN_MEMORY_BLOCK_SIZE;
else
*mem_block_size = be64_to_cpup(prop);
return 1;
}
static unsigned long radix_memory_block_size(void)
{
unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
/*
* OPAL firmware feature is set by now. Hence we are ok
* to test OPAL feature.
*/
if (firmware_has_feature(FW_FEATURE_OPAL))
mem_block_size = 1UL * 1024 * 1024 * 1024;
else
of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
return mem_block_size;
}
#else /* CONFIG_MEMORY_HOTPLUG */
static unsigned long radix_memory_block_size(void)
{
return 1UL * 1024 * 1024 * 1024;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
void __init radix__early_init_devtree(void) void __init radix__early_init_devtree(void)
{ {
int rc; int rc;
...@@ -493,17 +551,27 @@ void __init radix__early_init_devtree(void) ...@@ -493,17 +551,27 @@ void __init radix__early_init_devtree(void)
* Try to find the available page sizes in the device-tree * Try to find the available page sizes in the device-tree
*/ */
rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL); rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
if (rc != 0) /* Found */ if (!rc) {
goto found;
/* /*
* let's assume we have page 4k and 64k support * No page size details found in device tree.
* Let's assume we have page 4k and 64k support
*/ */
mmu_psize_defs[MMU_PAGE_4K].shift = 12; mmu_psize_defs[MMU_PAGE_4K].shift = 12;
mmu_psize_defs[MMU_PAGE_4K].ap = 0x0; mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
mmu_psize_defs[MMU_PAGE_64K].shift = 16; mmu_psize_defs[MMU_PAGE_64K].shift = 16;
mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
found: }
/*
* Max mapping size used when mapping pages. We don't use
* ppc_md.memory_block_size() here because this get called
* early and we don't have machine probe called yet. Also
* the pseries implementation only check for ibm,lmb-size.
* All hypervisor supporting radix do expose that device
* tree node.
*/
radix_mem_block_size = radix_memory_block_size();
return; return;
} }
...@@ -855,7 +923,8 @@ int __meminit radix__create_section_mapping(unsigned long start, ...@@ -855,7 +923,8 @@ int __meminit radix__create_section_mapping(unsigned long start,
return -1; return -1;
} }
return create_physical_mapping(__pa(start), __pa(end), nid, prot); return create_physical_mapping(__pa(start), __pa(end),
radix_mem_block_size, nid, prot);
} }
int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
......
...@@ -399,6 +399,14 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) ...@@ -399,6 +399,14 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
static unsigned long pnv_memory_block_size(void) static unsigned long pnv_memory_block_size(void)
{ {
/*
* We map the kernel linear region with 1GB large pages on radix. For
* memory hot unplug to work our memory block size must be at least
* this size.
*/
if (radix_enabled())
return radix_mem_block_size;
else
return 256UL * 1024 * 1024; return 256UL * 1024 * 1024;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment