Commit 8fd3d458 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] batched freeing of anon pages

A reworked version of the batched page freeing and lock amortisation
for VMA teardown.

It walks the existing 507-page list in the mmu_gather_t in 16-page
chunks, drops their refcounts in 16-page chunks, and de-LRUs and
frees any resulting zero-count pages in up-to-16 page chunks.
parent 2b341443
......@@ -79,10 +79,8 @@ static inline void tlb_flush_mmu(mmu_gather_t *tlb, unsigned long start, unsigne
tlb_flush(tlb);
nr = tlb->nr;
if (!tlb_fast_mode(tlb)) {
unsigned long i;
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
for (i=0; i < nr; i++)
free_page_and_swap_cache(tlb->pages[i]);
}
}
......
......@@ -24,6 +24,7 @@
#define page_cache_get(page) get_page(page)
#define page_cache_release(page) put_page(page)
void release_pages(struct page **pages, int nr);
static inline struct page *page_cache_alloc(struct address_space *x)
{
......
......@@ -182,6 +182,7 @@ extern int move_to_swap_cache(struct page *page, swp_entry_t entry);
extern int move_from_swap_cache(struct page *page, unsigned long index,
struct address_space *mapping);
extern void free_page_and_swap_cache(struct page *page);
extern void free_pages_and_swap_cache(struct page **pages, int nr);
extern struct page * lookup_swap_cache(swp_entry_t);
extern struct page * read_swap_cache_async(swp_entry_t);
......
......@@ -87,7 +87,7 @@ void __page_cache_release(struct page *page)
/*
* Batched page_cache_release(). Decrement the reference count on all the
* pagevec's pages. If it fell to zero then remove the page from the LRU and
* passed pages. If it fell to zero then remove the page from the LRU and
* free it.
*
* Avoid taking zone->lru_lock if possible, but if it is taken, retain it
......@@ -96,18 +96,16 @@ void __page_cache_release(struct page *page)
* The locking in this function is against shrink_cache(): we recheck the
* page count inside the lock to see whether shrink_cache grabbed the page
* via the LRU. If it did, give up: shrink_cache will free it.
*
* This function reinitialises the caller's pagevec.
*/
void __pagevec_release(struct pagevec *pvec)
void release_pages(struct page **pages, int nr)
{
int i;
struct pagevec pages_to_free;
struct zone *zone = NULL;
pagevec_init(&pages_to_free);
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
for (i = 0; i < nr; i++) {
struct page *page = pages[i];
struct zone *pagezone;
if (PageReserved(page) || !put_page_testzero(page))
......@@ -122,13 +120,24 @@ void __pagevec_release(struct pagevec *pvec)
}
if (TestClearPageLRU(page))
del_page_from_lru(zone, page);
if (page_count(page) == 0)
pagevec_add(&pages_to_free, page);
if (page_count(page) == 0) {
if (!pagevec_add(&pages_to_free, page)) {
spin_unlock_irq(&zone->lru_lock);
pagevec_free(&pages_to_free);
pagevec_init(&pages_to_free);
spin_lock_irq(&zone->lru_lock);
}
}
}
if (zone)
spin_unlock_irq(&zone->lru_lock);
pagevec_free(&pages_to_free);
}
void __pagevec_release(struct pagevec *pvec)
{
release_pages(pvec->pages, pagevec_count(pvec));
pagevec_init(pvec);
}
......
......@@ -292,14 +292,8 @@ int move_from_swap_cache(struct page *page, unsigned long index,
return err;
}
/*
* Perform a free_page(), also freeing any swap cache associated with
* this page if it is the last user of the page. Can not do a lock_page,
* as we are holding the page_table_lock spinlock.
*/
void free_page_and_swap_cache(struct page *page)
{
/*
* If we are the only user, then try to free up the swap cache.
*
* Its ok to check for PageSwapCache without the page lock
......@@ -307,13 +301,46 @@ void free_page_and_swap_cache(struct page *page)
* exclusive_swap_page() _with_ the lock.
* - Marcelo
*/
static inline void free_swap_cache(struct page *page)
{
if (PageSwapCache(page) && !TestSetPageLocked(page)) {
remove_exclusive_swap_page(page);
unlock_page(page);
}
}
/*
* Perform a free_page(), also freeing any swap cache associated with
* this page if it is the last user of the page. Can not do a lock_page,
* as we are holding the page_table_lock spinlock.
*/
void free_page_and_swap_cache(struct page *page)
{
free_swap_cache(page);
page_cache_release(page);
}
/*
* Passed an array of pages, drop them all from swapcache and then release
* them. They are removed from the LRU and freed if this is their last use.
*/
void free_pages_and_swap_cache(struct page **pages, int nr)
{
const int chunk = 16;
struct page **pagep = pages;
while (nr) {
int todo = min(chunk, nr);
int i;
for (i = 0; i < todo; i++)
free_swap_cache(pagep[i]);
release_pages(pagep, todo);
pagep += todo;
nr -= todo;
}
}
/*
* Lookup a swap entry in the swap cache. A found page will be returned
* unlocked and with its refcount incremented - we rely on the kernel
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment