Commit da456f14 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

page allocator: do not disable interrupts in free_page_mlock()

free_page_mlock() tests and clears PG_mlocked using locked versions of the
bit operations.  If set, it disables interrupts to update counters and
this happens on every page free even though interrupts are disabled very
shortly afterwards a second time.  This is wasteful.

This patch splits what free_page_mlock() does.  The bit check is still
made.  However, the update of counters is delayed until the interrupts are
disabled and the non-lock version for clearing the bit is used.  One
potential weirdness with this split is that the counters do not get
updated if the bad_page() check is triggered but a system showing bad
pages is getting screwed already.
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Reviewed-by: default avatarChristoph Lameter <cl@linux-foundation.org>
Reviewed-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: default avatarLee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ed0ae21d
...@@ -157,14 +157,9 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page) ...@@ -157,14 +157,9 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
*/ */
static inline void free_page_mlock(struct page *page) static inline void free_page_mlock(struct page *page)
{ {
if (unlikely(TestClearPageMlocked(page))) { __ClearPageMlocked(page);
unsigned long flags;
local_irq_save(flags);
__dec_zone_page_state(page, NR_MLOCK); __dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED); __count_vm_event(UNEVICTABLE_MLOCKFREED);
local_irq_restore(flags);
}
} }
#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */ #else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
......
...@@ -495,7 +495,6 @@ static inline void __free_one_page(struct page *page, ...@@ -495,7 +495,6 @@ static inline void __free_one_page(struct page *page,
static inline int free_pages_check(struct page *page) static inline int free_pages_check(struct page *page)
{ {
free_page_mlock(page);
if (unlikely(page_mapcount(page) | if (unlikely(page_mapcount(page) |
(page->mapping != NULL) | (page->mapping != NULL) |
(page_count(page) != 0) | (page_count(page) != 0) |
...@@ -552,6 +551,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) ...@@ -552,6 +551,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
unsigned long flags; unsigned long flags;
int i; int i;
int bad = 0; int bad = 0;
int clearMlocked = PageMlocked(page);
for (i = 0 ; i < (1 << order) ; ++i) for (i = 0 ; i < (1 << order) ; ++i)
bad += free_pages_check(page + i); bad += free_pages_check(page + i);
...@@ -567,6 +567,8 @@ static void __free_pages_ok(struct page *page, unsigned int order) ...@@ -567,6 +567,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
kernel_map_pages(page, 1 << order, 0); kernel_map_pages(page, 1 << order, 0);
local_irq_save(flags); local_irq_save(flags);
if (unlikely(clearMlocked))
free_page_mlock(page);
__count_vm_events(PGFREE, 1 << order); __count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order, free_one_page(page_zone(page), page, order,
get_pageblock_migratetype(page)); get_pageblock_migratetype(page));
...@@ -1013,6 +1015,7 @@ static void free_hot_cold_page(struct page *page, int cold) ...@@ -1013,6 +1015,7 @@ static void free_hot_cold_page(struct page *page, int cold)
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
unsigned long flags; unsigned long flags;
int clearMlocked = PageMlocked(page);
if (PageAnon(page)) if (PageAnon(page))
page->mapping = NULL; page->mapping = NULL;
...@@ -1028,7 +1031,10 @@ static void free_hot_cold_page(struct page *page, int cold) ...@@ -1028,7 +1031,10 @@ static void free_hot_cold_page(struct page *page, int cold)
pcp = &zone_pcp(zone, get_cpu())->pcp; pcp = &zone_pcp(zone, get_cpu())->pcp;
local_irq_save(flags); local_irq_save(flags);
if (unlikely(clearMlocked))
free_page_mlock(page);
__count_vm_event(PGFREE); __count_vm_event(PGFREE);
if (cold) if (cold)
list_add_tail(&page->lru, &pcp->list); list_add_tail(&page->lru, &pcp->list);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment