Commit 86051ca5 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

mm: fix usemap initialization

usemap must be initialized only when pfn is within zone.  If not, it corrupts
memory.

And this patch also reduces the number of calls to set_pageblock_migratetype()
from
	(pfn & (pageblock_nr_pages -1)
to
	!(pfn & (pageblock_nr_pages-1)
it should be called once per pageblock.
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Shi Weihua <shiwh@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: <stable@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a01e035e
...@@ -2524,7 +2524,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, ...@@ -2524,7 +2524,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
struct page *page; struct page *page;
unsigned long end_pfn = start_pfn + size; unsigned long end_pfn = start_pfn + size;
unsigned long pfn; unsigned long pfn;
struct zone *z;
z = &NODE_DATA(nid)->node_zones[zone];
for (pfn = start_pfn; pfn < end_pfn; pfn++) { for (pfn = start_pfn; pfn < end_pfn; pfn++) {
/* /*
* There can be holes in boot-time mem_map[]s * There can be holes in boot-time mem_map[]s
...@@ -2542,7 +2544,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, ...@@ -2542,7 +2544,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
init_page_count(page); init_page_count(page);
reset_page_mapcount(page); reset_page_mapcount(page);
SetPageReserved(page); SetPageReserved(page);
/* /*
* Mark the block movable so that blocks are reserved for * Mark the block movable so that blocks are reserved for
* movable at startup. This will force kernel allocations * movable at startup. This will force kernel allocations
...@@ -2551,8 +2552,15 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, ...@@ -2551,8 +2552,15 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
* kernel allocations are made. Later some blocks near * kernel allocations are made. Later some blocks near
* the start are marked MIGRATE_RESERVE by * the start are marked MIGRATE_RESERVE by
* setup_zone_migrate_reserve() * setup_zone_migrate_reserve()
*/ *
if ((pfn & (pageblock_nr_pages-1))) * bitmap is created for zone's valid pfn range. but memmap
* can be created for invalid pages (for alignment)
* check here not to call set_pageblock_migratetype() against
* pfn out of zone.
*/
if ((z->zone_start_pfn <= pfn)
&& (pfn < z->zone_start_pfn + z->spanned_pages)
&& !(pfn & (pageblock_nr_pages - 1)))
set_pageblock_migratetype(page, MIGRATE_MOVABLE); set_pageblock_migratetype(page, MIGRATE_MOVABLE);
INIT_LIST_HEAD(&page->lru); INIT_LIST_HEAD(&page->lru);
...@@ -4464,6 +4472,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, ...@@ -4464,6 +4472,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
bitmap = get_pageblock_bitmap(zone, pfn); bitmap = get_pageblock_bitmap(zone, pfn);
bitidx = pfn_to_bitidx(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn);
VM_BUG_ON(pfn < zone->zone_start_pfn);
VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
if (flags & value) if (flags & value)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment