Commit 108bcc96 authored by Cody P Schafer's avatar Cody P Schafer Committed by Linus Torvalds

mm: add & use zone_end_pfn() and zone_spans_pfn()

Add 2 helpers (zone_end_pfn() and zone_spans_pfn()) to reduce code
duplication.

This also switches to using them in compaction (where an additional
variable needed to be renamed), page_alloc, vmstat, memory_hotplug, and
kmemleak.

Note that in compaction.c I avoid calling zone_end_pfn() repeatedly
because I expect at some point the sycronization issues with start_pfn &
spanned_pages will need fixing, either by actually using the seqlock or
clever memory barrier usage.
Signed-off-by: default avatarCody P Schafer <cody@linux.vnet.ibm.com>
Cc: David Hansen <dave@linux.vnet.ibm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9127ab4f
...@@ -527,6 +527,16 @@ static inline int zone_is_oom_locked(const struct zone *zone) ...@@ -527,6 +527,16 @@ static inline int zone_is_oom_locked(const struct zone *zone)
return test_bit(ZONE_OOM_LOCKED, &zone->flags); return test_bit(ZONE_OOM_LOCKED, &zone->flags);
} }
static inline unsigned zone_end_pfn(const struct zone *zone)
{
return zone->zone_start_pfn + zone->spanned_pages;
}
static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
{
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}
/* /*
* The "priority" of VM scanning is how much of the queues we will scan in one * The "priority" of VM scanning is how much of the queues we will scan in one
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
......
...@@ -86,7 +86,7 @@ static inline bool isolation_suitable(struct compact_control *cc, ...@@ -86,7 +86,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
static void __reset_isolation_suitable(struct zone *zone) static void __reset_isolation_suitable(struct zone *zone)
{ {
unsigned long start_pfn = zone->zone_start_pfn; unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; unsigned long end_pfn = zone_end_pfn(zone);
unsigned long pfn; unsigned long pfn;
zone->compact_cached_migrate_pfn = start_pfn; zone->compact_cached_migrate_pfn = start_pfn;
...@@ -647,7 +647,7 @@ static void isolate_freepages(struct zone *zone, ...@@ -647,7 +647,7 @@ static void isolate_freepages(struct zone *zone,
struct compact_control *cc) struct compact_control *cc)
{ {
struct page *page; struct page *page;
unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
int nr_freepages = cc->nr_freepages; int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages; struct list_head *freelist = &cc->freepages;
...@@ -666,7 +666,7 @@ static void isolate_freepages(struct zone *zone, ...@@ -666,7 +666,7 @@ static void isolate_freepages(struct zone *zone,
*/ */
high_pfn = min(low_pfn, pfn); high_pfn = min(low_pfn, pfn);
zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; z_end_pfn = zone_end_pfn(zone);
/* /*
* Isolate free pages until enough are available to migrate the * Isolate free pages until enough are available to migrate the
...@@ -709,7 +709,7 @@ static void isolate_freepages(struct zone *zone, ...@@ -709,7 +709,7 @@ static void isolate_freepages(struct zone *zone,
* only scans within a pageblock * only scans within a pageblock
*/ */
end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
end_pfn = min(end_pfn, zone_end_pfn); end_pfn = min(end_pfn, z_end_pfn);
isolated = isolate_freepages_block(cc, pfn, end_pfn, isolated = isolate_freepages_block(cc, pfn, end_pfn,
freelist, false); freelist, false);
nr_freepages += isolated; nr_freepages += isolated;
...@@ -923,7 +923,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) ...@@ -923,7 +923,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
{ {
int ret; int ret;
unsigned long start_pfn = zone->zone_start_pfn; unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; unsigned long end_pfn = zone_end_pfn(zone);
ret = compaction_suitable(zone, cc->order); ret = compaction_suitable(zone, cc->order);
switch (ret) { switch (ret) {
......
...@@ -1300,9 +1300,8 @@ static void kmemleak_scan(void) ...@@ -1300,9 +1300,8 @@ static void kmemleak_scan(void)
*/ */
lock_memory_hotplug(); lock_memory_hotplug();
for_each_online_node(i) { for_each_online_node(i) {
pg_data_t *pgdat = NODE_DATA(i); unsigned long start_pfn = node_start_pfn(i);
unsigned long start_pfn = pgdat->node_start_pfn; unsigned long end_pfn = node_end_pfn(i);
unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
unsigned long pfn; unsigned long pfn;
for (pfn = start_pfn; pfn < end_pfn; pfn++) { for (pfn = start_pfn; pfn < end_pfn; pfn++) {
......
...@@ -299,7 +299,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, ...@@ -299,7 +299,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
pgdat_resize_lock(z1->zone_pgdat, &flags); pgdat_resize_lock(z1->zone_pgdat, &flags);
/* can't move pfns which are higher than @z2 */ /* can't move pfns which are higher than @z2 */
if (end_pfn > z2->zone_start_pfn + z2->spanned_pages) if (end_pfn > zone_end_pfn(z2))
goto out_fail; goto out_fail;
/* the move out part mast at the left most of @z2 */ /* the move out part mast at the left most of @z2 */
if (start_pfn > z2->zone_start_pfn) if (start_pfn > z2->zone_start_pfn)
...@@ -315,7 +315,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, ...@@ -315,7 +315,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
z1_start_pfn = start_pfn; z1_start_pfn = start_pfn;
resize_zone(z1, z1_start_pfn, end_pfn); resize_zone(z1, z1_start_pfn, end_pfn);
resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages); resize_zone(z2, end_pfn, zone_end_pfn(z2));
pgdat_resize_unlock(z1->zone_pgdat, &flags); pgdat_resize_unlock(z1->zone_pgdat, &flags);
...@@ -347,15 +347,15 @@ static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2, ...@@ -347,15 +347,15 @@ static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
if (z1->zone_start_pfn > start_pfn) if (z1->zone_start_pfn > start_pfn)
goto out_fail; goto out_fail;
/* the move out part mast at the right most of @z1 */ /* the move out part mast at the right most of @z1 */
if (z1->zone_start_pfn + z1->spanned_pages > end_pfn) if (zone_end_pfn(z1) > end_pfn)
goto out_fail; goto out_fail;
/* must included/overlap */ /* must included/overlap */
if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages) if (start_pfn >= zone_end_pfn(z1))
goto out_fail; goto out_fail;
/* use end_pfn for z2's end_pfn if z2 is empty */ /* use end_pfn for z2's end_pfn if z2 is empty */
if (z2->spanned_pages) if (z2->spanned_pages)
z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages; z2_end_pfn = zone_end_pfn(z2);
else else
z2_end_pfn = end_pfn; z2_end_pfn = end_pfn;
......
...@@ -250,9 +250,7 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page) ...@@ -250,9 +250,7 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
do { do {
seq = zone_span_seqbegin(zone); seq = zone_span_seqbegin(zone);
if (pfn >= zone->zone_start_pfn + zone->spanned_pages) if (!zone_spans_pfn(zone, pfn))
ret = 1;
else if (pfn < zone->zone_start_pfn)
ret = 1; ret = 1;
} while (zone_span_seqretry(zone, seq)); } while (zone_span_seqretry(zone, seq));
...@@ -990,9 +988,9 @@ int move_freepages_block(struct zone *zone, struct page *page, ...@@ -990,9 +988,9 @@ int move_freepages_block(struct zone *zone, struct page *page,
end_pfn = start_pfn + pageblock_nr_pages - 1; end_pfn = start_pfn + pageblock_nr_pages - 1;
/* Do not cross zone boundaries */ /* Do not cross zone boundaries */
if (start_pfn < zone->zone_start_pfn) if (!zone_spans_pfn(zone, start_pfn))
start_page = page; start_page = page;
if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages) if (!zone_spans_pfn(zone, end_pfn))
return 0; return 0;
return move_freepages(zone, start_page, end_page, migratetype); return move_freepages(zone, start_page, end_page, migratetype);
...@@ -1286,7 +1284,7 @@ void mark_free_pages(struct zone *zone) ...@@ -1286,7 +1284,7 @@ void mark_free_pages(struct zone *zone)
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
...@@ -3798,7 +3796,7 @@ static void setup_zone_migrate_reserve(struct zone *zone) ...@@ -3798,7 +3796,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
* the block. * the block.
*/ */
start_pfn = zone->zone_start_pfn; start_pfn = zone->zone_start_pfn;
end_pfn = start_pfn + zone->spanned_pages; end_pfn = zone_end_pfn(zone);
start_pfn = roundup(start_pfn, pageblock_nr_pages); start_pfn = roundup(start_pfn, pageblock_nr_pages);
reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
pageblock_order; pageblock_order;
...@@ -3912,7 +3910,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, ...@@ -3912,7 +3910,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
* pfn out of zone. * pfn out of zone.
*/ */
if ((z->zone_start_pfn <= pfn) if ((z->zone_start_pfn <= pfn)
&& (pfn < z->zone_start_pfn + z->spanned_pages) && (pfn < zone_end_pfn(z))
&& !(pfn & (pageblock_nr_pages - 1))) && !(pfn & (pageblock_nr_pages - 1)))
set_pageblock_migratetype(page, MIGRATE_MOVABLE); set_pageblock_migratetype(page, MIGRATE_MOVABLE);
...@@ -4713,7 +4711,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) ...@@ -4713,7 +4711,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
* for the buddy allocator to function correctly. * for the buddy allocator to function correctly.
*/ */
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
end = pgdat->node_start_pfn + pgdat->node_spanned_pages; end = pgdat_end_pfn(pgdat);
end = ALIGN(end, MAX_ORDER_NR_PAGES); end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page); size = (end - start) * sizeof(struct page);
map = alloc_remap(pgdat->node_id, size); map = alloc_remap(pgdat->node_id, size);
...@@ -5928,8 +5926,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, ...@@ -5928,8 +5926,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
bitmap = get_pageblock_bitmap(zone, pfn); bitmap = get_pageblock_bitmap(zone, pfn);
bitidx = pfn_to_bitidx(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn);
VM_BUG_ON(pfn < zone->zone_start_pfn); VM_BUG_ON(!zone_spans_pfn(zone, pfn));
VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
if (flags & value) if (flags & value)
...@@ -6027,8 +6024,7 @@ bool is_pageblock_removable_nolock(struct page *page) ...@@ -6027,8 +6024,7 @@ bool is_pageblock_removable_nolock(struct page *page)
zone = page_zone(page); zone = page_zone(page);
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
if (zone->zone_start_pfn > pfn || if (!zone_spans_pfn(zone, pfn))
zone->zone_start_pfn + zone->spanned_pages <= pfn)
return false; return false;
return !has_unmovable_pages(zone, page, 0, true); return !has_unmovable_pages(zone, page, 0, true);
......
...@@ -891,7 +891,7 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m, ...@@ -891,7 +891,7 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
int mtype; int mtype;
unsigned long pfn; unsigned long pfn;
unsigned long start_pfn = zone->zone_start_pfn; unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = start_pfn + zone->spanned_pages; unsigned long end_pfn = zone_end_pfn(zone);
unsigned long count[MIGRATE_TYPES] = { 0, }; unsigned long count[MIGRATE_TYPES] = { 0, };
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment