Commit 479f854a authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm, page_alloc: defer debugging checks of pages allocated from the PCP

Every page allocated checks a number of page fields for validity.  This
catches corruption bugs of pages that are already freed but it is
expensive.  This patch weakens the debugging check by checking PCP pages
only when the PCP lists are being refilled.  All compound pages are
checked.  This potentially avoids debugging checks entirely if the PCP
lists are never emptied and refilled so some corruption issues may be
missed.  Full checking requires DEBUG_VM.

With the two deferred debugging patches applied, the impact to a page
allocator microbenchmark is

                                             4.6.0-rc3                  4.6.0-rc3
                                           inline-v3r6            deferalloc-v3r7
  Min      alloc-odr0-1               344.00 (  0.00%)           317.00 (  7.85%)
  Min      alloc-odr0-2               248.00 (  0.00%)           231.00 (  6.85%)
  Min      alloc-odr0-4               209.00 (  0.00%)           192.00 (  8.13%)
  Min      alloc-odr0-8               181.00 (  0.00%)           166.00 (  8.29%)
  Min      alloc-odr0-16              168.00 (  0.00%)           154.00 (  8.33%)
  Min      alloc-odr0-32              161.00 (  0.00%)           148.00 (  8.07%)
  Min      alloc-odr0-64              158.00 (  0.00%)           145.00 (  8.23%)
  Min      alloc-odr0-128             156.00 (  0.00%)           143.00 (  8.33%)
  Min      alloc-odr0-256             168.00 (  0.00%)           154.00 (  8.33%)
  Min      alloc-odr0-512             178.00 (  0.00%)           167.00 (  6.18%)
  Min      alloc-odr0-1024            186.00 (  0.00%)           174.00 (  6.45%)
  Min      alloc-odr0-2048            192.00 (  0.00%)           180.00 (  6.25%)
  Min      alloc-odr0-4096            198.00 (  0.00%)           184.00 (  7.07%)
  Min      alloc-odr0-8192            200.00 (  0.00%)           188.00 (  6.00%)
  Min      alloc-odr0-16384           201.00 (  0.00%)           188.00 (  6.47%)
  Min      free-odr0-1                189.00 (  0.00%)           180.00 (  4.76%)
  Min      free-odr0-2                132.00 (  0.00%)           126.00 (  4.55%)
  Min      free-odr0-4                104.00 (  0.00%)            99.00 (  4.81%)
  Min      free-odr0-8                 90.00 (  0.00%)            85.00 (  5.56%)
  Min      free-odr0-16                84.00 (  0.00%)            80.00 (  4.76%)
  Min      free-odr0-32                80.00 (  0.00%)            76.00 (  5.00%)
  Min      free-odr0-64                78.00 (  0.00%)            74.00 (  5.13%)
  Min      free-odr0-128               77.00 (  0.00%)            73.00 (  5.19%)
  Min      free-odr0-256               94.00 (  0.00%)            91.00 (  3.19%)
  Min      free-odr0-512              108.00 (  0.00%)           112.00 ( -3.70%)
  Min      free-odr0-1024             115.00 (  0.00%)           118.00 ( -2.61%)
  Min      free-odr0-2048             120.00 (  0.00%)           125.00 ( -4.17%)
  Min      free-odr0-4096             123.00 (  0.00%)           129.00 ( -4.88%)
  Min      free-odr0-8192             126.00 (  0.00%)           130.00 ( -3.17%)
  Min      free-odr0-16384            126.00 (  0.00%)           131.00 ( -3.97%)

Note that the free paths for large numbers of pages is impacted as the
debugging cost gets shifted into that path when the page data is no
longer necessarily cache-hot.
Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4db7548c
...@@ -1714,7 +1714,41 @@ static inline bool free_pages_prezeroed(bool poisoned) ...@@ -1714,7 +1714,41 @@ static inline bool free_pages_prezeroed(bool poisoned)
page_poisoning_enabled() && poisoned; page_poisoning_enabled() && poisoned;
} }
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, #ifdef CONFIG_DEBUG_VM
static bool check_pcp_refill(struct page *page)
{
return false;
}
static bool check_new_pcp(struct page *page)
{
return check_new_page(page);
}
#else
static bool check_pcp_refill(struct page *page)
{
return check_new_page(page);
}
static bool check_new_pcp(struct page *page)
{
return false;
}
#endif /* CONFIG_DEBUG_VM */
static bool check_new_pages(struct page *page, unsigned int order)
{
int i;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (unlikely(check_new_page(p)))
return true;
}
return false;
}
static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
unsigned int alloc_flags) unsigned int alloc_flags)
{ {
int i; int i;
...@@ -1722,8 +1756,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, ...@@ -1722,8 +1756,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
for (i = 0; i < (1 << order); i++) { for (i = 0; i < (1 << order); i++) {
struct page *p = page + i; struct page *p = page + i;
if (unlikely(check_new_page(p)))
return 1;
if (poisoned) if (poisoned)
poisoned &= page_is_poisoned(p); poisoned &= page_is_poisoned(p);
} }
...@@ -1755,8 +1787,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, ...@@ -1755,8 +1787,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
set_page_pfmemalloc(page); set_page_pfmemalloc(page);
else else
clear_page_pfmemalloc(page); clear_page_pfmemalloc(page);
return 0;
} }
/* /*
...@@ -2178,6 +2208,9 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, ...@@ -2178,6 +2208,9 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
if (unlikely(page == NULL)) if (unlikely(page == NULL))
break; break;
if (unlikely(check_pcp_refill(page)))
continue;
/* /*
* Split buddy pages returned by expand() are received here * Split buddy pages returned by expand() are received here
* in physical page order. The page is added to the callers and * in physical page order. The page is added to the callers and
...@@ -2593,6 +2626,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, ...@@ -2593,6 +2626,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
struct list_head *list; struct list_head *list;
local_irq_save(flags); local_irq_save(flags);
do {
pcp = &this_cpu_ptr(zone->pageset)->pcp; pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype]; list = &pcp->lists[migratetype];
if (list_empty(list)) { if (list_empty(list)) {
...@@ -2607,6 +2641,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, ...@@ -2607,6 +2641,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
page = list_last_entry(list, struct page, lru); page = list_last_entry(list, struct page, lru);
else else
page = list_first_entry(list, struct page, lru); page = list_first_entry(list, struct page, lru);
} while (page && check_new_pcp(page));
__dec_zone_state(zone, NR_ALLOC_BATCH); __dec_zone_state(zone, NR_ALLOC_BATCH);
list_del(&page->lru); list_del(&page->lru);
...@@ -2619,6 +2654,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, ...@@ -2619,6 +2654,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
do {
page = NULL; page = NULL;
if (alloc_flags & ALLOC_HARDER) { if (alloc_flags & ALLOC_HARDER) {
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
...@@ -2627,6 +2663,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, ...@@ -2627,6 +2663,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
} }
if (!page) if (!page)
page = __rmqueue(zone, order, migratetype); page = __rmqueue(zone, order, migratetype);
} while (page && check_new_pages(page, order));
spin_unlock(&zone->lock); spin_unlock(&zone->lock);
if (!page) if (!page)
goto failed; goto failed;
...@@ -2993,8 +3030,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, ...@@ -2993,8 +3030,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order, page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order,
gfp_mask, alloc_flags, ac->migratetype); gfp_mask, alloc_flags, ac->migratetype);
if (page) { if (page) {
if (prep_new_page(page, order, gfp_mask, alloc_flags)) prep_new_page(page, order, gfp_mask, alloc_flags);
goto try_this_zone;
/* /*
* If this is a high-order atomic allocation then check * If this is a high-order atomic allocation then check
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment