Commit acbc15a4 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/debug_pagealloc.c: clean-up guard page handling code

Patch series "Reduce memory waste by page extension user".

This patchset tries to reduce memory waste by page extension user.

First case is architecture supported debug_pagealloc.  It doesn't
requires additional memory if guard page isn't used.  8 bytes per page
will be saved in this case.

Second case is related to page owner feature.  Until now, if page_ext
users want to use it's own fields on page_ext, fields should be defined
in struct page_ext by hard-coding.  It has a following problem.

  struct page_ext {
   #ifdef CONFIG_A
  	int a;
   #endif
   #ifdef CONFIG_B
	int b;
   #endif
  };

Assume that kernel is built with both CONFIG_A and CONFIG_B.  Even if we
enable feature A and doesn't enable feature B at runtime, each entry of
struct page_ext takes two int rather than one int.  It's undesirable
waste so this patch tries to reduce it.  By this patchset, we can save
20 bytes per page dedicated for page owner feature in some
configurations.

This patch (of 6):

We can make code clean by moving decision condition for set_page_guard()
into set_page_guard() itself.  It will help code readability.  There is
no functional change.

Link: http://lkml.kernel.org/r/1471315879-32294-2-git-send-email-iamjoonsoo.kim@lge.comSigned-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bf484383
...@@ -637,17 +637,20 @@ static int __init debug_guardpage_minorder_setup(char *buf) ...@@ -637,17 +637,20 @@ static int __init debug_guardpage_minorder_setup(char *buf)
} }
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
static inline void set_page_guard(struct zone *zone, struct page *page, static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) unsigned int order, int migratetype)
{ {
struct page_ext *page_ext; struct page_ext *page_ext;
if (!debug_guardpage_enabled()) if (!debug_guardpage_enabled())
return; return false;
if (order >= debug_guardpage_minorder())
return false;
page_ext = lookup_page_ext(page); page_ext = lookup_page_ext(page);
if (unlikely(!page_ext)) if (unlikely(!page_ext))
return; return false;
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
...@@ -655,6 +658,8 @@ static inline void set_page_guard(struct zone *zone, struct page *page, ...@@ -655,6 +658,8 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
set_page_private(page, order); set_page_private(page, order);
/* Guard pages are not available for any usage */ /* Guard pages are not available for any usage */
__mod_zone_freepage_state(zone, -(1 << order), migratetype); __mod_zone_freepage_state(zone, -(1 << order), migratetype);
return true;
} }
static inline void clear_page_guard(struct zone *zone, struct page *page, static inline void clear_page_guard(struct zone *zone, struct page *page,
...@@ -677,8 +682,8 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, ...@@ -677,8 +682,8 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
} }
#else #else
struct page_ext_operations debug_guardpage_ops = { NULL, }; struct page_ext_operations debug_guardpage_ops = { NULL, };
static inline void set_page_guard(struct zone *zone, struct page *page, static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) {} unsigned int order, int migratetype) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page, static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order, int migratetype) {} unsigned int order, int migratetype) {}
#endif #endif
...@@ -1622,18 +1627,15 @@ static inline void expand(struct zone *zone, struct page *page, ...@@ -1622,18 +1627,15 @@ static inline void expand(struct zone *zone, struct page *page,
size >>= 1; size >>= 1;
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
debug_guardpage_enabled() &&
high < debug_guardpage_minorder()) {
/* /*
* Mark as guard pages (or page), that will allow to * Mark as guard pages (or page), that will allow to
* merge back to allocator when buddy will be freed. * merge back to allocator when buddy will be freed.
* Corresponding page table entries will not be touched, * Corresponding page table entries will not be touched,
* pages will stay not present in virtual address space * pages will stay not present in virtual address space
*/ */
set_page_guard(zone, &page[size], high, migratetype); if (set_page_guard(zone, &page[size], high, migratetype))
continue; continue;
}
list_add(&page[size].lru, &area->free_list[migratetype]); list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++; area->nr_free++;
set_page_order(&page[size], high); set_page_order(&page[size], high);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment