Commit 163a3d84 authored by Andrew Morton's avatar Andrew Morton Committed by Jeff Garzik

[PATCH] hugetlb put_page speedup

Rework this function so that we only make the indirect call to the
page-freeing function on the final put_page(), rather than on every
invokation.
parent f004b8b3
...@@ -29,6 +29,8 @@ static long htlbzone_pages; ...@@ -29,6 +29,8 @@ static long htlbzone_pages;
static LIST_HEAD(htlbpage_freelist); static LIST_HEAD(htlbpage_freelist);
static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED; static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
void free_huge_page(struct page *page);
static struct page *alloc_hugetlb_page(void) static struct page *alloc_hugetlb_page(void)
{ {
int i; int i;
...@@ -45,7 +47,7 @@ static struct page *alloc_hugetlb_page(void) ...@@ -45,7 +47,7 @@ static struct page *alloc_hugetlb_page(void)
htlbpagemem--; htlbpagemem--;
spin_unlock(&htlbpage_lock); spin_unlock(&htlbpage_lock);
set_page_count(page, 1); set_page_count(page, 1);
page->lru.prev = (void *)huge_page_release; page->lru.prev = (void *)free_huge_page;
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
clear_highpage(&page[i]); clear_highpage(&page[i]);
return page; return page;
......
...@@ -26,6 +26,8 @@ static long htlbzone_pages; ...@@ -26,6 +26,8 @@ static long htlbzone_pages;
static LIST_HEAD(htlbpage_freelist); static LIST_HEAD(htlbpage_freelist);
static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED; static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
void free_huge_page(struct page *page);
static struct page *alloc_hugetlb_page(void) static struct page *alloc_hugetlb_page(void)
{ {
int i; int i;
...@@ -42,6 +44,7 @@ static struct page *alloc_hugetlb_page(void) ...@@ -42,6 +44,7 @@ static struct page *alloc_hugetlb_page(void)
htlbpagemem--; htlbpagemem--;
spin_unlock(&htlbpage_lock); spin_unlock(&htlbpage_lock);
set_page_count(page, 1); set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page;
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
clear_highpage(&page[i]); clear_highpage(&page[i]);
return page; return page;
......
...@@ -25,6 +25,7 @@ spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED; ...@@ -25,6 +25,7 @@ spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
extern long htlbpagemem; extern long htlbpagemem;
static void zap_hugetlb_resources(struct vm_area_struct *); static void zap_hugetlb_resources(struct vm_area_struct *);
void free_huge_page(struct page *page);
#define MAX_ID 32 #define MAX_ID 32
struct htlbpagekey { struct htlbpagekey {
...@@ -64,6 +65,7 @@ static struct page *alloc_hugetlb_page(void) ...@@ -64,6 +65,7 @@ static struct page *alloc_hugetlb_page(void)
spin_unlock(&htlbpage_lock); spin_unlock(&htlbpage_lock);
set_page_count(page, 1); set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page;
memset(page_address(page), 0, HPAGE_SIZE); memset(page_address(page), 0, HPAGE_SIZE);
return page; return page;
......
...@@ -232,11 +232,15 @@ static inline void get_page(struct page *page) ...@@ -232,11 +232,15 @@ static inline void get_page(struct page *page)
static inline void put_page(struct page *page) static inline void put_page(struct page *page)
{ {
if (PageCompound(page)) { if (PageCompound(page)) {
page = (struct page *)page->lru.next; if (put_page_testzero(page)) {
if (page->lru.prev) { /* destructor? */ page = (struct page *)page->lru.next;
(*(void (*)(struct page *))page->lru.prev)(page); if (page->lru.prev) { /* destructor? */
return; (*(void (*)(struct page *))page->lru.prev)(page);
} else {
__page_cache_release(page);
}
} }
return;
} }
if (!PageReserved(page) && put_page_testzero(page)) if (!PageReserved(page) && put_page_testzero(page))
__page_cache_release(page); __page_cache_release(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment