Commit 6d576c06 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] hugepage fixes

From: William Lee Irwin III <wli@holomorphy.com>

mm/hugetlb.c is putting the destructor in head->lru.prev not head[1].mapping;
fix below along with nuking huge_page_release(), which simply duplicates
put_page().
parent 65ea1aff
...@@ -220,7 +220,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, ...@@ -220,7 +220,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
if (pte_none(pte)) if (pte_none(pte))
continue; continue;
page = pte_page(pte); page = pte_page(pte);
huge_page_release(page); put_page(page);
} }
mm->rss -= (end - start) >> PAGE_SHIFT; mm->rss -= (end - start) >> PAGE_SHIFT;
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
......
...@@ -248,7 +248,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsig ...@@ -248,7 +248,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsig
if (pte_none(*pte)) if (pte_none(*pte))
continue; continue;
page = pte_page(*pte); page = pte_page(*pte);
huge_page_release(page); put_page(page);
pte_clear(pte); pte_clear(pte);
} }
mm->rss -= (end - start) >> PAGE_SHIFT; mm->rss -= (end - start) >> PAGE_SHIFT;
......
...@@ -394,7 +394,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, ...@@ -394,7 +394,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
flush_hash_hugepage(mm->context, addr, flush_hash_hugepage(mm->context, addr,
pte, local); pte, local);
huge_page_release(page); put_page(page);
} }
mm->rss -= (end - start) >> PAGE_SHIFT; mm->rss -= (end - start) >> PAGE_SHIFT;
......
...@@ -200,7 +200,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, ...@@ -200,7 +200,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
if (pte_none(*pte)) if (pte_none(*pte))
continue; continue;
page = pte_page(*pte); page = pte_page(*pte);
huge_page_release(page); put_page(page);
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
pte_clear(pte); pte_clear(pte);
pte++; pte++;
......
...@@ -196,7 +196,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, ...@@ -196,7 +196,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
if (pte_none(*pte)) if (pte_none(*pte))
continue; continue;
page = pte_page(*pte); page = pte_page(*pte);
huge_page_release(page); put_page(page);
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
pte_clear(pte); pte_clear(pte);
pte++; pte++;
......
...@@ -142,7 +142,7 @@ void huge_pagevec_release(struct pagevec *pvec) ...@@ -142,7 +142,7 @@ void huge_pagevec_release(struct pagevec *pvec)
int i; int i;
for (i = 0; i < pagevec_count(pvec); ++i) for (i = 0; i < pagevec_count(pvec); ++i)
huge_page_release(pvec->pages[i]); put_page(pvec->pages[i]);
pagevec_reinit(pvec); pagevec_reinit(pvec);
} }
...@@ -152,7 +152,7 @@ void truncate_huge_page(struct page *page) ...@@ -152,7 +152,7 @@ void truncate_huge_page(struct page *page)
clear_page_dirty(page); clear_page_dirty(page);
ClearPageUptodate(page); ClearPageUptodate(page);
remove_from_page_cache(page); remove_from_page_cache(page);
huge_page_release(page); put_page(page);
} }
void truncate_hugepages(struct address_space *mapping, loff_t lstart) void truncate_hugepages(struct address_space *mapping, loff_t lstart)
......
...@@ -16,7 +16,6 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page ...@@ -16,7 +16,6 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page
void zap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); void zap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
int hugetlb_prefault(struct address_space *, struct vm_area_struct *); int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
void huge_page_release(struct page *);
int hugetlb_report_meminfo(char *); int hugetlb_report_meminfo(char *);
int is_hugepage_mem_enough(size_t); int is_hugepage_mem_enough(size_t);
unsigned long hugetlb_total_pages(void); unsigned long hugetlb_total_pages(void);
...@@ -68,7 +67,6 @@ static inline unsigned long hugetlb_total_pages(void) ...@@ -68,7 +67,6 @@ static inline unsigned long hugetlb_total_pages(void)
#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
#define zap_hugepage_range(vma, start, len) BUG() #define zap_hugepage_range(vma, start, len) BUG()
#define unmap_hugepage_range(vma, start, end) BUG() #define unmap_hugepage_range(vma, start, end) BUG()
#define huge_page_release(page) BUG()
#define is_hugepage_mem_enough(size) 0 #define is_hugepage_mem_enough(size) 0
#define hugetlb_report_meminfo(buf) 0 #define hugetlb_report_meminfo(buf) 0
#define mark_mm_hugetlb(mm, vma) do { } while (0) #define mark_mm_hugetlb(mm, vma) do { } while (0)
......
...@@ -78,20 +78,12 @@ struct page *alloc_huge_page(void) ...@@ -78,20 +78,12 @@ struct page *alloc_huge_page(void)
free_huge_pages--; free_huge_pages--;
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
set_page_count(page, 1); set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page; page[1].mapping = (void *)free_huge_page;
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i) for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
clear_highpage(&page[i]); clear_highpage(&page[i]);
return page; return page;
} }
void huge_page_release(struct page *page)
{
if (!put_page_testzero(page))
return;
free_huge_page(page);
}
static int __init hugetlb_init(void) static int __init hugetlb_init(void)
{ {
unsigned long i; unsigned long i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment