Commit 21823259 authored by Paul Mundt's avatar Paul Mundt

sh: Ensure active regions have a backing PMB entry.

In the NUMA or memory hot-add case where system memory has been
partitioned up, we immediately run in to a situation where the existing
PMB entry doesn't cover the new range (primarily as a result of the entry
size being shrunk to match the node size early in the initialization). In
order to fix this up it's necessary to preload a PMB mapping for the new
range prior to activation in order to circumvent reset by MMU.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent dfbca899
...@@ -191,13 +191,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, ...@@ -191,13 +191,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn) unsigned long end_pfn)
{ {
struct resource *res = &mem_resources[nid]; struct resource *res = &mem_resources[nid];
unsigned long start, end;
WARN_ON(res->name); /* max one active range per node for now */ WARN_ON(res->name); /* max one active range per node for now */
start = start_pfn << PAGE_SHIFT;
end = end_pfn << PAGE_SHIFT;
res->name = "System RAM"; res->name = "System RAM";
res->start = start_pfn << PAGE_SHIFT; res->start = start;
res->end = (end_pfn << PAGE_SHIFT) - 1; res->end = end - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res)) { if (request_resource(&iomem_resource, res)) {
pr_err("unable to request memory_resource 0x%lx 0x%lx\n", pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
start_pfn, end_pfn); start_pfn, end_pfn);
...@@ -213,6 +218,14 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, ...@@ -213,6 +218,14 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
request_resource(res, &data_resource); request_resource(res, &data_resource);
request_resource(res, &bss_resource); request_resource(res, &bss_resource);
/*
* Also make sure that there is a PMB mapping that covers this
* range before we attempt to activate it, to avoid reset by MMU.
* We can hit this path with NUMA or memory hot-add.
*/
pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
PAGE_KERNEL);
add_active_range(nid, start_pfn, end_pfn); add_active_range(nid, start_pfn, end_pfn);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment