Commit bfeb022f authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Linus Torvalds

mm/memory_hotplug: add pgprot_t to mhp_params

devm_memremap_pages() is currently used by the PCI P2PDMA code to create
struct page mappings for IO memory.  At present, these mappings are
created with PAGE_KERNEL which implies setting the PAT bits to be WB.
However, on x86, an mtrr register will typically override this and force
the cache type to be UC-.  In the case firmware doesn't set this
register it is effectively WB and will typically result in a machine
check exception when it's accessed.

Other arches are not currently likely to function correctly seeing they
don't have any MTRR registers to fall back on.

To solve this, provide a way to specify the pgprot value explicitly to
arch_add_memory().

Of the arches that support MEMORY_HOTPLUG: x86_64, and arm64 need a
simple change to pass the pgprot_t down to their respective functions
which set up the page tables.  For x86_32, set the page tables
explicitly using _set_memory_prot() (seeing they are already mapped).

For ia64, s390 and sh, reject anything but PAGE_KERNEL settings -- this
should be fine, for now, seeing these architectures don't support
ZONE_DEVICE.

A check in __add_pages() is also added to ensure the pgprot parameter
was set for all arches.
Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarDan Williams <dan.j.williams@intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Eric Badger <ebadger@gigaio.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200306170846.9333-7-logang@deltatee.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4e00c5af
...@@ -1382,7 +1382,8 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -1382,7 +1382,8 @@ int arch_add_memory(int nid, u64 start, u64 size,
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
size, PAGE_KERNEL, __pgd_pgtable_alloc, flags); size, params->pgprot, __pgd_pgtable_alloc,
flags);
memblock_clear_nomap(start, size); memblock_clear_nomap(start, size);
......
...@@ -676,6 +676,9 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -676,6 +676,9 @@ int arch_add_memory(int nid, u64 start, u64 size,
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
int ret; int ret;
if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
return -EINVAL;
ret = __add_pages(nid, start_pfn, nr_pages, params); ret = __add_pages(nid, start_pfn, nr_pages, params);
if (ret) if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n", printk("%s: Problem encountered in __add_pages() as ret=%d\n",
......
...@@ -132,7 +132,8 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, ...@@ -132,7 +132,8 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
resize_hpt_for_hotplug(memblock_phys_mem_size()); resize_hpt_for_hotplug(memblock_phys_mem_size());
start = (unsigned long)__va(start); start = (unsigned long)__va(start);
rc = create_section_mapping(start, start + size, nid, PAGE_KERNEL); rc = create_section_mapping(start, start + size, nid,
params->pgprot);
if (rc) { if (rc) {
pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
start, start + size, rc); start, start + size, rc);
......
...@@ -277,6 +277,9 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -277,6 +277,9 @@ int arch_add_memory(int nid, u64 start, u64 size,
if (WARN_ON_ONCE(params->altmap)) if (WARN_ON_ONCE(params->altmap))
return -EINVAL; return -EINVAL;
if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
return -EINVAL;
rc = vmem_add_mapping(start, size); rc = vmem_add_mapping(start, size);
if (rc) if (rc)
return rc; return rc;
......
...@@ -412,6 +412,9 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -412,6 +412,9 @@ int arch_add_memory(int nid, u64 start, u64 size,
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
int ret; int ret;
if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)
return -EINVAL;
/* We only have ZONE_NORMAL, so this is easy.. */ /* We only have ZONE_NORMAL, so this is easy.. */
ret = __add_pages(nid, start_pfn, nr_pages, params); ret = __add_pages(nid, start_pfn, nr_pages, params);
if (unlikely(ret)) if (unlikely(ret))
......
...@@ -824,6 +824,18 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -824,6 +824,18 @@ int arch_add_memory(int nid, u64 start, u64 size,
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
/*
* The page tables were already mapped at boot so if the caller
* requests a different mapping type then we must change all the
* pages with __set_memory_prot().
*/
if (params->pgprot.pgprot != PAGE_KERNEL.pgprot) {
ret = __set_memory_prot(start, nr_pages, params->pgprot);
if (ret)
return ret;
}
return __add_pages(nid, start_pfn, nr_pages, params); return __add_pages(nid, start_pfn, nr_pages, params);
} }
......
...@@ -867,7 +867,7 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -867,7 +867,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
init_memory_mapping(start, start + size, PAGE_KERNEL); init_memory_mapping(start, start + size, params->pgprot);
return add_pages(nid, start_pfn, nr_pages, params); return add_pages(nid, start_pfn, nr_pages, params);
} }
......
...@@ -60,9 +60,12 @@ enum { ...@@ -60,9 +60,12 @@ enum {
/* /*
* Extended parameters for memory hotplug: * Extended parameters for memory hotplug:
* altmap: alternative allocator for memmap array (optional) * altmap: alternative allocator for memmap array (optional)
* pgprot: page protection flags to apply to newly created page tables
* (required)
*/ */
struct mhp_params { struct mhp_params {
struct vmem_altmap *altmap; struct vmem_altmap *altmap;
pgprot_t pgprot;
}; };
/* /*
......
...@@ -311,6 +311,9 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, ...@@ -311,6 +311,9 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
int err; int err;
struct vmem_altmap *altmap = params->altmap; struct vmem_altmap *altmap = params->altmap;
if (WARN_ON_ONCE(!params->pgprot.pgprot))
return -EINVAL;
err = check_hotplug_memory_addressable(pfn, nr_pages); err = check_hotplug_memory_addressable(pfn, nr_pages);
if (err) if (err)
return err; return err;
...@@ -1002,7 +1005,7 @@ static int online_memory_block(struct memory_block *mem, void *arg) ...@@ -1002,7 +1005,7 @@ static int online_memory_block(struct memory_block *mem, void *arg)
*/ */
int __ref add_memory_resource(int nid, struct resource *res) int __ref add_memory_resource(int nid, struct resource *res)
{ {
struct mhp_params params = {}; struct mhp_params params = { .pgprot = PAGE_KERNEL };
u64 start, size; u64 start, size;
bool new_node = false; bool new_node = false;
int ret; int ret;
......
...@@ -189,8 +189,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -189,8 +189,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
* We do not want any optional features only our own memmap * We do not want any optional features only our own memmap
*/ */
.altmap = pgmap_altmap(pgmap), .altmap = pgmap_altmap(pgmap),
.pgprot = PAGE_KERNEL,
}; };
pgprot_t pgprot = PAGE_KERNEL;
int error, is_ram; int error, is_ram;
bool need_devmap_managed = true; bool need_devmap_managed = true;
...@@ -282,8 +282,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -282,8 +282,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
if (nid < 0) if (nid < 0)
nid = numa_mem_id(); nid = numa_mem_id();
error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0, error = track_pfn_remap(NULL, &params.pgprot, PHYS_PFN(res->start),
resource_size(res)); 0, resource_size(res));
if (error) if (error)
goto err_pfn_remap; goto err_pfn_remap;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment