Commit 6423aa81 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/page_alloc.c: recalculate some of node threshold when on/offline memory

Some of node threshold depends on number of managed pages in the node.
When memory is going on/offline, it can be changed and we need to adjust
them.

Add recalculation to appropriate places and clean-up related functions
for better maintenance.

Link: http://lkml.kernel.org/r/1470724248-26780-2-git-send-email-iamjoonsoo.kim@lge.comSigned-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarMel Gorman <mgorman@techsingularity.net>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 81cbcbc2
...@@ -4757,6 +4757,8 @@ int local_memory_node(int node) ...@@ -4757,6 +4757,8 @@ int local_memory_node(int node)
} }
#endif #endif
static void setup_min_unmapped_ratio(void);
static void setup_min_slab_ratio(void);
#else /* CONFIG_NUMA */ #else /* CONFIG_NUMA */
static void set_zonelist_order(void) static void set_zonelist_order(void)
...@@ -5878,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) ...@@ -5878,9 +5880,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
zone->node = nid; zone->node = nid;
pgdat->min_unmapped_pages += (freesize*sysctl_min_unmapped_ratio)
/ 100;
pgdat->min_slab_pages += (freesize * sysctl_min_slab_ratio) / 100;
#endif #endif
zone->name = zone_names[j]; zone->name = zone_names[j];
zone->zone_pgdat = pgdat; zone->zone_pgdat = pgdat;
...@@ -6801,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void) ...@@ -6801,6 +6800,12 @@ int __meminit init_per_zone_wmark_min(void)
setup_per_zone_wmarks(); setup_per_zone_wmarks();
refresh_zone_stat_thresholds(); refresh_zone_stat_thresholds();
setup_per_zone_lowmem_reserve(); setup_per_zone_lowmem_reserve();
#ifdef CONFIG_NUMA
setup_min_unmapped_ratio();
setup_min_slab_ratio();
#endif
return 0; return 0;
} }
core_initcall(init_per_zone_wmark_min) core_initcall(init_per_zone_wmark_min)
...@@ -6842,16 +6847,10 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, ...@@ -6842,16 +6847,10 @@ int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, static void setup_min_unmapped_ratio(void)
void __user *buffer, size_t *length, loff_t *ppos)
{ {
struct pglist_data *pgdat; pg_data_t *pgdat;
struct zone *zone; struct zone *zone;
int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
for_each_online_pgdat(pgdat) for_each_online_pgdat(pgdat)
pgdat->min_unmapped_pages = 0; pgdat->min_unmapped_pages = 0;
...@@ -6859,26 +6858,47 @@ int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, ...@@ -6859,26 +6858,47 @@ int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
for_each_zone(zone) for_each_zone(zone)
zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
sysctl_min_unmapped_ratio) / 100; sysctl_min_unmapped_ratio) / 100;
return 0;
} }
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos) void __user *buffer, size_t *length, loff_t *ppos)
{ {
struct pglist_data *pgdat;
struct zone *zone;
int rc; int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos); rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc) if (rc)
return rc; return rc;
setup_min_unmapped_ratio();
return 0;
}
static void setup_min_slab_ratio(void)
{
pg_data_t *pgdat;
struct zone *zone;
for_each_online_pgdat(pgdat) for_each_online_pgdat(pgdat)
pgdat->min_slab_pages = 0; pgdat->min_slab_pages = 0;
for_each_zone(zone) for_each_zone(zone)
zone->zone_pgdat->min_slab_pages += (zone->managed_pages * zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
sysctl_min_slab_ratio) / 100; sysctl_min_slab_ratio) / 100;
}
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
int rc;
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
if (rc)
return rc;
setup_min_slab_ratio();
return 0; return 0;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment