Commit f5637d3b authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Linus Torvalds

mm/memory_hotplug: rename mhp_restrictions to mhp_params

The mhp_restrictions struct really doesn't specify anything resembling a
restriction anymore so rename it to be mhp_params as it is a list of
extended parameters.
Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Eric Badger <ebadger@gigaio.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/20200306170846.9333-3-logang@deltatee.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 96c6b598
...@@ -1374,7 +1374,7 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) ...@@ -1374,7 +1374,7 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
} }
int arch_add_memory(int nid, u64 start, u64 size, int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions) struct mhp_params *params)
{ {
int ret, flags = 0; int ret, flags = 0;
...@@ -1387,7 +1387,7 @@ int arch_add_memory(int nid, u64 start, u64 size, ...@@ -1387,7 +1387,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
memblock_clear_nomap(start, size); memblock_clear_nomap(start, size);
ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
restrictions); params);
if (ret) if (ret)
__remove_pgd_mapping(swapper_pg_dir, __remove_pgd_mapping(swapper_pg_dir,
__phys_to_virt(start), size); __phys_to_virt(start), size);
......
...@@ -670,13 +670,13 @@ mem_init (void) ...@@ -670,13 +670,13 @@ mem_init (void)
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions) struct mhp_params *params)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
int ret; int ret;
ret = __add_pages(nid, start_pfn, nr_pages, restrictions); ret = __add_pages(nid, start_pfn, nr_pages, params);
if (ret) if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n", printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret); __func__, ret);
......
...@@ -122,7 +122,7 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, ...@@ -122,7 +122,7 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
} }
int __ref arch_add_memory(int nid, u64 start, u64 size, int __ref arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions) struct mhp_params *params)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
...@@ -138,7 +138,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, ...@@ -138,7 +138,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
return -EFAULT; return -EFAULT;
} }
return __add_pages(nid, start_pfn, nr_pages, restrictions); return __add_pages(nid, start_pfn, nr_pages, params);
} }
void __ref arch_remove_memory(int nid, u64 start, u64 size, void __ref arch_remove_memory(int nid, u64 start, u64 size,
......
...@@ -268,20 +268,20 @@ device_initcall(s390_cma_mem_init); ...@@ -268,20 +268,20 @@ device_initcall(s390_cma_mem_init);
#endif /* CONFIG_CMA */ #endif /* CONFIG_CMA */
int arch_add_memory(int nid, u64 start, u64 size, int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions) struct mhp_params *params)
{ {
unsigned long start_pfn = PFN_DOWN(start); unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size); unsigned long size_pages = PFN_DOWN(size);
int rc; int rc;
if (WARN_ON_ONCE(restrictions->altmap)) if (WARN_ON_ONCE(params->altmap))
return -EINVAL; return -EINVAL;
rc = vmem_add_mapping(start, size); rc = vmem_add_mapping(start, size);
if (rc) if (rc)
return rc; return rc;
rc = __add_pages(nid, start_pfn, size_pages, restrictions); rc = __add_pages(nid, start_pfn, size_pages, params);
if (rc) if (rc)
vmem_remove_mapping(start, size); vmem_remove_mapping(start, size);
return rc; return rc;
......
...@@ -406,14 +406,14 @@ void __init mem_init(void) ...@@ -406,14 +406,14 @@ void __init mem_init(void)
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions) struct mhp_params *params)
{ {
unsigned long start_pfn = PFN_DOWN(start); unsigned long start_pfn = PFN_DOWN(start);
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
int ret; int ret;
/* We only have ZONE_NORMAL, so this is easy.. */ /* We only have ZONE_NORMAL, so this is easy.. */
ret = __add_pages(nid, start_pfn, nr_pages, restrictions); ret = __add_pages(nid, start_pfn, nr_pages, params);
if (unlikely(ret)) if (unlikely(ret))
printk("%s: Failed, __add_pages() == %d\n", __func__, ret); printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
......
...@@ -819,12 +819,12 @@ void __init mem_init(void) ...@@ -819,12 +819,12 @@ void __init mem_init(void)
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions) struct mhp_params *params)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
return __add_pages(nid, start_pfn, nr_pages, restrictions); return __add_pages(nid, start_pfn, nr_pages, params);
} }
void arch_remove_memory(int nid, u64 start, u64 size, void arch_remove_memory(int nid, u64 start, u64 size,
......
...@@ -843,11 +843,11 @@ static void update_end_of_memory_vars(u64 start, u64 size) ...@@ -843,11 +843,11 @@ static void update_end_of_memory_vars(u64 start, u64 size)
} }
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
struct mhp_restrictions *restrictions) struct mhp_params *params)
{ {
int ret; int ret;
ret = __add_pages(nid, start_pfn, nr_pages, restrictions); ret = __add_pages(nid, start_pfn, nr_pages, params);
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
/* update max_pfn, max_low_pfn and high_memory */ /* update max_pfn, max_low_pfn and high_memory */
...@@ -858,14 +858,14 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, ...@@ -858,14 +858,14 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
} }
int arch_add_memory(int nid, u64 start, u64 size, int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions) struct mhp_params *params)
{ {
unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT;
init_memory_mapping(start, start + size); init_memory_mapping(start, start + size);
return add_pages(nid, start_pfn, nr_pages, restrictions); return add_pages(nid, start_pfn, nr_pages, params);
} }
#define PAGE_INUSE 0xFD #define PAGE_INUSE 0xFD
......
...@@ -58,10 +58,10 @@ enum { ...@@ -58,10 +58,10 @@ enum {
}; };
/* /*
* Restrictions for the memory hotplug: * Extended parameters for memory hotplug:
* altmap: alternative allocator for memmap array * altmap: alternative allocator for memmap array (optional)
*/ */
struct mhp_restrictions { struct mhp_params {
struct vmem_altmap *altmap; struct vmem_altmap *altmap;
}; };
...@@ -112,7 +112,7 @@ extern int restore_online_page_callback(online_page_callback_t callback); ...@@ -112,7 +112,7 @@ extern int restore_online_page_callback(online_page_callback_t callback);
extern int try_online_node(int nid); extern int try_online_node(int nid);
extern int arch_add_memory(int nid, u64 start, u64 size, extern int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions); struct mhp_params *params);
extern u64 max_mem_size; extern u64 max_mem_size;
extern int memhp_online_type_from_str(const char *str); extern int memhp_online_type_from_str(const char *str);
...@@ -133,17 +133,17 @@ extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, ...@@ -133,17 +133,17 @@ extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
/* reasonably generic interface to expand the physical pages */ /* reasonably generic interface to expand the physical pages */
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
struct mhp_restrictions *restrictions); struct mhp_params *params);
#ifndef CONFIG_ARCH_HAS_ADD_PAGES #ifndef CONFIG_ARCH_HAS_ADD_PAGES
static inline int add_pages(int nid, unsigned long start_pfn, static inline int add_pages(int nid, unsigned long start_pfn,
unsigned long nr_pages, struct mhp_restrictions *restrictions) unsigned long nr_pages, struct mhp_params *params)
{ {
return __add_pages(nid, start_pfn, nr_pages, restrictions); return __add_pages(nid, start_pfn, nr_pages, params);
} }
#else /* ARCH_HAS_ADD_PAGES */ #else /* ARCH_HAS_ADD_PAGES */
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
struct mhp_restrictions *restrictions); struct mhp_params *params);
#endif /* ARCH_HAS_ADD_PAGES */ #endif /* ARCH_HAS_ADD_PAGES */
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
......
...@@ -304,12 +304,12 @@ static int check_hotplug_memory_addressable(unsigned long pfn, ...@@ -304,12 +304,12 @@ static int check_hotplug_memory_addressable(unsigned long pfn,
* add the new pages. * add the new pages.
*/ */
int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
struct mhp_restrictions *restrictions) struct mhp_params *params)
{ {
const unsigned long end_pfn = pfn + nr_pages; const unsigned long end_pfn = pfn + nr_pages;
unsigned long cur_nr_pages; unsigned long cur_nr_pages;
int err; int err;
struct vmem_altmap *altmap = restrictions->altmap; struct vmem_altmap *altmap = params->altmap;
err = check_hotplug_memory_addressable(pfn, nr_pages); err = check_hotplug_memory_addressable(pfn, nr_pages);
if (err) if (err)
...@@ -1002,7 +1002,7 @@ static int online_memory_block(struct memory_block *mem, void *arg) ...@@ -1002,7 +1002,7 @@ static int online_memory_block(struct memory_block *mem, void *arg)
*/ */
int __ref add_memory_resource(int nid, struct resource *res) int __ref add_memory_resource(int nid, struct resource *res)
{ {
struct mhp_restrictions restrictions = {}; struct mhp_params params = {};
u64 start, size; u64 start, size;
bool new_node = false; bool new_node = false;
int ret; int ret;
...@@ -1030,7 +1030,7 @@ int __ref add_memory_resource(int nid, struct resource *res) ...@@ -1030,7 +1030,7 @@ int __ref add_memory_resource(int nid, struct resource *res)
new_node = ret; new_node = ret;
/* call arch's memory hotadd */ /* call arch's memory hotadd */
ret = arch_add_memory(nid, start, size, &restrictions); ret = arch_add_memory(nid, start, size, &params);
if (ret < 0) if (ret < 0)
goto error; goto error;
......
...@@ -184,7 +184,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -184,7 +184,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
{ {
struct resource *res = &pgmap->res; struct resource *res = &pgmap->res;
struct dev_pagemap *conflict_pgmap; struct dev_pagemap *conflict_pgmap;
struct mhp_restrictions restrictions = { struct mhp_params params = {
/* /*
* We do not want any optional features only our own memmap * We do not want any optional features only our own memmap
*/ */
...@@ -302,7 +302,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -302,7 +302,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
*/ */
if (pgmap->type == MEMORY_DEVICE_PRIVATE) { if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
error = add_pages(nid, PHYS_PFN(res->start), error = add_pages(nid, PHYS_PFN(res->start),
PHYS_PFN(resource_size(res)), &restrictions); PHYS_PFN(resource_size(res)), &params);
} else { } else {
error = kasan_add_zero_shadow(__va(res->start), resource_size(res)); error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
if (error) { if (error) {
...@@ -311,7 +311,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -311,7 +311,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
} }
error = arch_add_memory(nid, res->start, resource_size(res), error = arch_add_memory(nid, res->start, resource_size(res),
&restrictions); &params);
} }
if (!error) { if (!error) {
...@@ -319,7 +319,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) ...@@ -319,7 +319,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
move_pfn_range_to_zone(zone, PHYS_PFN(res->start), move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
PHYS_PFN(resource_size(res)), restrictions.altmap); PHYS_PFN(resource_size(res)), params.altmap);
} }
mem_hotplug_done(); mem_hotplug_done();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment