Commit 163a3d84 authored by Andrew Morton's avatar Andrew Morton Committed by Jeff Garzik

[PATCH] hugetlb put_page speedup

Rework this function so that we only make the indirect call to the
page-freeing function on the final put_page(), rather than on every
invokation.
parent f004b8b3
......@@ -29,6 +29,8 @@ static long htlbzone_pages;
static LIST_HEAD(htlbpage_freelist);
static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
void free_huge_page(struct page *page);
static struct page *alloc_hugetlb_page(void)
{
int i;
......@@ -45,7 +47,7 @@ static struct page *alloc_hugetlb_page(void)
htlbpagemem--;
spin_unlock(&htlbpage_lock);
set_page_count(page, 1);
page->lru.prev = (void *)huge_page_release;
page->lru.prev = (void *)free_huge_page;
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
clear_highpage(&page[i]);
return page;
......
......@@ -26,6 +26,8 @@ static long htlbzone_pages;
static LIST_HEAD(htlbpage_freelist);
static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
void free_huge_page(struct page *page);
static struct page *alloc_hugetlb_page(void)
{
int i;
......@@ -42,6 +44,7 @@ static struct page *alloc_hugetlb_page(void)
htlbpagemem--;
spin_unlock(&htlbpage_lock);
set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page;
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
clear_highpage(&page[i]);
return page;
......
......@@ -25,6 +25,7 @@ spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
extern long htlbpagemem;
static void zap_hugetlb_resources(struct vm_area_struct *);
void free_huge_page(struct page *page);
#define MAX_ID 32
struct htlbpagekey {
......@@ -64,6 +65,7 @@ static struct page *alloc_hugetlb_page(void)
spin_unlock(&htlbpage_lock);
set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page;
memset(page_address(page), 0, HPAGE_SIZE);
return page;
......
......@@ -232,12 +232,16 @@ static inline void get_page(struct page *page)
static inline void put_page(struct page *page)
{
if (PageCompound(page)) {
if (put_page_testzero(page)) {
page = (struct page *)page->lru.next;
if (page->lru.prev) { /* destructor? */
(*(void (*)(struct page *))page->lru.prev)(page);
return;
} else {
__page_cache_release(page);
}
}
return;
}
if (!PageReserved(page) && put_page_testzero(page))
__page_cache_release(page);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment