Commit 5f7a75ac authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: page_alloc: do not cache reclaim distances

pgdat->reclaim_nodes tracks if a remote node is allowed to be reclaimed
by zone_reclaim due to its distance.  As it is expected that
zone_reclaim_mode will be rarely enabled it is unreasonable for all
machines to take a penalty.  Fortunately, the zone_reclaim_mode() path
is already slow and it is the path that takes the hit.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarZhang Yanfei <zhangyanfei@cn.fujitsu.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Reviewed-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4f9b16a6
...@@ -763,7 +763,6 @@ typedef struct pglist_data { ...@@ -763,7 +763,6 @@ typedef struct pglist_data {
unsigned long node_spanned_pages; /* total size of physical page unsigned long node_spanned_pages; /* total size of physical page
range, including holes */ range, including holes */
int node_id; int node_id;
nodemask_t reclaim_nodes; /* Nodes allowed to reclaim from */
wait_queue_head_t kswapd_wait; wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait; wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
......
...@@ -1850,16 +1850,8 @@ static bool zone_local(struct zone *local_zone, struct zone *zone) ...@@ -1850,16 +1850,8 @@ static bool zone_local(struct zone *local_zone, struct zone *zone)
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{ {
return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes); return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
} RECLAIM_DISTANCE;
static void __paginginit init_zone_allows_reclaim(int nid)
{
int i;
for_each_node_state(i, N_MEMORY)
if (node_distance(nid, i) <= RECLAIM_DISTANCE)
node_set(i, NODE_DATA(nid)->reclaim_nodes);
} }
#else /* CONFIG_NUMA */ #else /* CONFIG_NUMA */
...@@ -1893,9 +1885,6 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) ...@@ -1893,9 +1885,6 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
return true; return true;
} }
static inline void init_zone_allows_reclaim(int nid)
{
}
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
/* /*
...@@ -4933,8 +4922,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, ...@@ -4933,8 +4922,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
pgdat->node_id = nid; pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn; pgdat->node_start_pfn = node_start_pfn;
if (node_state(nid, N_MEMORY))
init_zone_allows_reclaim(nid);
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment