Commit c8b976af authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] hugetlb consolidation

From: William Lee Irwin III <wli@holomorphy.com>

The following patch consolidates redundant code in various hugetlb
implementations.  I took the liberty of renaming a few things, since the
code was all moved anyway, and it has the benefit of helping to catch
missed conversions and/or consolidations.
parent 618e7f44
...@@ -20,68 +20,6 @@ ...@@ -20,68 +20,6 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static long htlbpagemem;
int htlbpage_max;
static long htlbzone_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
static void enqueue_huge_page(struct page *page)
{
list_add(&page->lru,
&hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
}
static struct page *dequeue_huge_page(void)
{
int nid = numa_node_id();
struct page *page = NULL;
if (list_empty(&hugepage_freelists[nid])) {
for (nid = 0; nid < MAX_NUMNODES; ++nid)
if (!list_empty(&hugepage_freelists[nid]))
break;
}
if (nid >= 0 && nid < MAX_NUMNODES && !list_empty(&hugepage_freelists[nid])) {
page = list_entry(hugepage_freelists[nid].next, struct page, lru);
list_del(&page->lru);
}
return page;
}
static struct page *alloc_fresh_huge_page(void)
{
static int nid = 0;
struct page *page;
page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP,
HUGETLB_PAGE_ORDER);
nid = (nid + 1) % numnodes;
return page;
}
static void free_huge_page(struct page *page);
static struct page *alloc_hugetlb_page(void)
{
int i;
struct page *page;
spin_lock(&htlbpage_lock);
page = dequeue_huge_page();
if (!page) {
spin_unlock(&htlbpage_lock);
return NULL;
}
htlbpagemem--;
spin_unlock(&htlbpage_lock);
set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page;
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
clear_highpage(&page[i]);
return page;
}
static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
...@@ -276,26 +214,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -276,26 +214,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
} }
#endif #endif
static void free_huge_page(struct page *page)
{
BUG_ON(page_count(page));
INIT_LIST_HEAD(&page->lru);
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
htlbpagemem++;
spin_unlock(&htlbpage_lock);
}
void huge_page_release(struct page *page)
{
if (!put_page_testzero(page))
return;
free_huge_page(page);
}
void unmap_hugepage_range(struct vm_area_struct *vma, void unmap_hugepage_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
...@@ -319,16 +237,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, ...@@ -319,16 +237,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
} }
void
zap_hugepage_range(struct vm_area_struct *vma,
unsigned long start, unsigned long length)
{
struct mm_struct *mm = vma->vm_mm;
spin_lock(&mm->page_table_lock);
unmap_hugepage_range(vma, start, start + length);
spin_unlock(&mm->page_table_lock);
}
int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
...@@ -360,7 +268,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -360,7 +268,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
page = alloc_hugetlb_page(); page = alloc_huge_page();
if (!page) { if (!page) {
hugetlb_put_quota(mapping); hugetlb_put_quota(mapping);
ret = -ENOMEM; ret = -ENOMEM;
...@@ -380,173 +288,3 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -380,173 +288,3 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return ret; return ret;
} }
static void update_and_free_page(struct page *page)
{
int j;
struct page *map;
map = page;
htlbzone_pages--;
for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1<< PG_writeback);
set_page_count(map, 0);
map++;
}
set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
}
static int try_to_free_low(int count)
{
struct list_head *p;
struct page *page, *map;
map = NULL;
spin_lock(&htlbpage_lock);
/* all lowmem is on node 0 */
list_for_each(p, &hugepage_freelists[0]) {
if (map) {
list_del(&map->lru);
update_and_free_page(map);
htlbpagemem--;
map = NULL;
if (++count == 0)
break;
}
page = list_entry(p, struct page, lru);
if (!PageHighMem(page))
map = page;
}
if (map) {
list_del(&map->lru);
update_and_free_page(map);
htlbpagemem--;
count++;
}
spin_unlock(&htlbpage_lock);
return count;
}
static int set_hugetlb_mem_size(int count)
{
int lcount;
struct page *page;
if (count < 0)
lcount = count;
else
lcount = count - htlbzone_pages;
if (lcount == 0)
return (int)htlbzone_pages;
if (lcount > 0) { /* Increase the mem size. */
while (lcount--) {
page = alloc_fresh_huge_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
htlbpagemem++;
htlbzone_pages++;
spin_unlock(&htlbpage_lock);
}
return (int) htlbzone_pages;
}
/* Shrink the memory size. */
lcount = try_to_free_low(lcount);
while (lcount++) {
page = alloc_hugetlb_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
update_and_free_page(page);
spin_unlock(&htlbpage_lock);
}
return (int) htlbzone_pages;
}
int hugetlb_sysctl_handler(ctl_table *table, int write,
struct file *file, void *buffer, size_t *length)
{
if (!cpu_has_pse)
return -ENODEV;
proc_dointvec(table, write, file, buffer, length);
htlbpage_max = set_hugetlb_mem_size(htlbpage_max);
return 0;
}
static int __init hugetlb_setup(char *s)
{
if (sscanf(s, "%d", &htlbpage_max) <= 0)
htlbpage_max = 0;
return 1;
}
__setup("hugepages=", hugetlb_setup);
static int __init hugetlb_init(void)
{
int i;
struct page *page;
if (!cpu_has_pse)
return -ENODEV;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&hugepage_freelists[i]);
for (i = 0; i < htlbpage_max; ++i) {
page = alloc_fresh_huge_page();
if (!page)
break;
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
spin_unlock(&htlbpage_lock);
}
htlbpage_max = htlbpagemem = htlbzone_pages = i;
printk("Total HugeTLB memory allocated, %ld\n", htlbpagemem);
return 0;
}
module_init(hugetlb_init);
int hugetlb_report_meminfo(char *buf)
{
return sprintf(buf,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
"Hugepagesize: %5lu kB\n",
htlbzone_pages,
htlbpagemem,
HPAGE_SIZE/1024);
}
int is_hugepage_mem_enough(size_t size)
{
return (size + ~HPAGE_MASK)/HPAGE_SIZE <= htlbpagemem;
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
return htlbzone_pages * (HPAGE_SIZE / PAGE_SIZE);
}
EXPORT_SYMBOL(hugetlb_total_pages);
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far.
*/
static struct page *hugetlb_nopage(struct vm_area_struct *vma,
unsigned long address, int *unused)
{
BUG();
return NULL;
}
struct vm_operations_struct hugetlb_vm_ops = {
.nopage = hugetlb_nopage,
};
...@@ -22,69 +22,7 @@ ...@@ -22,69 +22,7 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static long htlbpagemem; unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
int htlbpage_max;
static long htlbzone_pages;
unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
static void enqueue_huge_page(struct page *page)
{
list_add(&page->lru,
&hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
}
static struct page *dequeue_huge_page(void)
{
int nid = numa_node_id();
struct page *page = NULL;
if (list_empty(&hugepage_freelists[nid])) {
for (nid = 0; nid < MAX_NUMNODES; ++nid)
if (!list_empty(&hugepage_freelists[nid]))
break;
}
if (nid >= 0 && nid < MAX_NUMNODES &&
!list_empty(&hugepage_freelists[nid])) {
page = list_entry(hugepage_freelists[nid].next, struct page, lru);
list_del(&page->lru);
}
return page;
}
static struct page *alloc_fresh_huge_page(void)
{
static int nid = 0;
struct page *page;
page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP,
HUGETLB_PAGE_ORDER);
nid = (nid + 1) % numnodes;
return page;
}
void free_huge_page(struct page *page);
static struct page *alloc_hugetlb_page(void)
{
int i;
struct page *page;
spin_lock(&htlbpage_lock);
page = dequeue_huge_page();
if (!page) {
spin_unlock(&htlbpage_lock);
return NULL;
}
htlbpagemem--;
spin_unlock(&htlbpage_lock);
set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page;
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
clear_highpage(&page[i]);
return page;
}
static pte_t * static pte_t *
huge_pte_alloc (struct mm_struct *mm, unsigned long addr) huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
...@@ -244,26 +182,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int wri ...@@ -244,26 +182,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int wri
return NULL; return NULL;
} }
void free_huge_page(struct page *page)
{
BUG_ON(page_count(page));
INIT_LIST_HEAD(&page->lru);
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
htlbpagemem++;
spin_unlock(&htlbpage_lock);
}
void huge_page_release(struct page *page)
{
if (!put_page_testzero(page))
return;
free_huge_page(page);
}
/* /*
* Same as generic free_pgtables(), except constant PGDIR_* and pgd_offset * Same as generic free_pgtables(), except constant PGDIR_* and pgd_offset
* are hugetlb region specific. * are hugetlb region specific.
...@@ -339,14 +257,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsig ...@@ -339,14 +257,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsig
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
} }
void zap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long length)
{
struct mm_struct *mm = vma->vm_mm;
spin_lock(&mm->page_table_lock);
unmap_hugepage_range(vma, start, start + length);
spin_unlock(&mm->page_table_lock);
}
int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
...@@ -378,7 +288,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -378,7 +288,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
page = alloc_hugetlb_page(); page = alloc_huge_page();
if (!page) { if (!page) {
hugetlb_put_quota(mapping); hugetlb_put_quota(mapping);
ret = -ENOMEM; ret = -ENOMEM;
...@@ -422,106 +332,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u ...@@ -422,106 +332,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
addr = ALIGN(vmm->vm_end, HPAGE_SIZE); addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
} }
} }
void update_and_free_page(struct page *page)
{
int j;
struct page *map;
map = page;
htlbzone_pages--;
for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1<< PG_writeback);
set_page_count(map, 0);
map++;
}
set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
}
int try_to_free_low(int count)
{
struct list_head *p;
struct page *page, *map;
map = NULL;
spin_lock(&htlbpage_lock);
list_for_each(p, &hugepage_freelists[0]) {
if (map) {
list_del(&map->lru);
update_and_free_page(map);
htlbpagemem--;
map = NULL;
if (++count == 0)
break;
}
page = list_entry(p, struct page, lru);
if (!PageHighMem(page))
map = page;
}
if (map) {
list_del(&map->lru);
update_and_free_page(map);
htlbpagemem--;
count++;
}
spin_unlock(&htlbpage_lock);
return count;
}
int set_hugetlb_mem_size(int count)
{
int lcount;
struct page *page ;
if (count < 0)
lcount = count;
else
lcount = count - htlbzone_pages;
if (lcount == 0)
return (int)htlbzone_pages;
if (lcount > 0) { /* Increase the mem size. */
while (lcount--) {
page = alloc_fresh_huge_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
htlbpagemem++;
htlbzone_pages++;
spin_unlock(&htlbpage_lock);
}
return (int) htlbzone_pages;
}
/* Shrink the memory size. */
lcount = try_to_free_low(lcount);
while (lcount++) {
page = alloc_hugetlb_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
update_and_free_page(page);
spin_unlock(&htlbpage_lock);
}
return (int) htlbzone_pages;
}
int hugetlb_sysctl_handler(ctl_table *table, int write, struct file *file, void *buffer, size_t *length)
{
proc_dointvec(table, write, file, buffer, length);
htlbpage_max = set_hugetlb_mem_size(htlbpage_max);
return 0;
}
static int __init hugetlb_setup(char *s)
{
if (sscanf(s, "%d", &htlbpage_max) <= 0)
htlbpage_max = 0;
return 1;
}
__setup("hugepages=", hugetlb_setup);
static int __init hugetlb_setup_sz(char *str) static int __init hugetlb_setup_sz(char *str)
{ {
...@@ -551,60 +361,3 @@ static int __init hugetlb_setup_sz(char *str) ...@@ -551,60 +361,3 @@ static int __init hugetlb_setup_sz(char *str)
return 1; return 1;
} }
__setup("hugepagesz=", hugetlb_setup_sz); __setup("hugepagesz=", hugetlb_setup_sz);
static int __init hugetlb_init(void)
{
int i;
struct page *page;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&hugepage_freelists[i]);
for (i = 0; i < htlbpage_max; ++i) {
page = alloc_fresh_huge_page();
if (!page)
break;
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
spin_unlock(&htlbpage_lock);
}
htlbpage_max = htlbpagemem = htlbzone_pages = i;
printk("Total HugeTLB memory allocated, %ld\n", htlbpagemem);
return 0;
}
__initcall(hugetlb_init);
int hugetlb_report_meminfo(char *buf)
{
return sprintf(buf,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
"Hugepagesize: %5lu kB\n",
htlbzone_pages,
htlbpagemem,
HPAGE_SIZE/1024);
}
int is_hugepage_mem_enough(size_t size)
{
if (size > (htlbpagemem << HPAGE_SHIFT))
return 0;
return 1;
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
return htlbzone_pages * (HPAGE_SIZE / PAGE_SIZE);
}
EXPORT_SYMBOL(hugetlb_total_pages);
static struct page *hugetlb_nopage(struct vm_area_struct * area, unsigned long address, int *unused)
{
BUG();
return NULL;
}
struct vm_operations_struct hugetlb_vm_ops = {
.nopage = hugetlb_nopage,
};
...@@ -29,65 +29,6 @@ ...@@ -29,65 +29,6 @@
#include <linux/sysctl.h> #include <linux/sysctl.h>
int htlbpage_max;
/* This lock protects the two counters and list below */
static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
static int htlbpage_free; /* = 0 */
static int htlbpage_total; /* = 0 */
static struct list_head hugepage_freelists[MAX_NUMNODES];
static void enqueue_huge_page(struct page *page)
{
list_add(&page->lru,
&hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
}
/* XXX make this a sysctl */
unsigned long largepage_roundrobin = 1;
static struct page *dequeue_huge_page(void)
{
static int nid = 0;
struct page *page = NULL;
int i;
if (!largepage_roundrobin)
nid = numa_node_id();
for (i = 0; i < numnodes; i++) {
if (!list_empty(&hugepage_freelists[nid]))
break;
nid = (nid + 1) % numnodes;
}
if (!list_empty(&hugepage_freelists[nid])) {
page = list_entry(hugepage_freelists[nid].next, struct page, lru);
list_del(&page->lru);
}
if (largepage_roundrobin)
nid = (nid + 1) % numnodes;
return page;
}
static struct page *alloc_fresh_huge_page(void)
{
static int nid = 0;
struct page *page;
page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP,
HUGETLB_PAGE_ORDER);
if (!page)
return NULL;
nid = page_zone(page)->zone_pgdat->node_id;
nid = (nid + 1) % numnodes;
return page;
}
/* HugePTE layout: /* HugePTE layout:
* *
* 31 30 ... 15 14 13 12 10 9 8 7 6 5 4 3 2 1 0 * 31 30 ... 15 14 13 12 10 9 8 7 6 5 4 3 2 1 0
...@@ -119,7 +60,6 @@ typedef struct {unsigned int val;} hugepte_t; ...@@ -119,7 +60,6 @@ typedef struct {unsigned int val;} hugepte_t;
#define hugepte_none(x) (!(hugepte_val(x) & _HUGEPAGE_PFN)) #define hugepte_none(x) (!(hugepte_val(x) & _HUGEPAGE_PFN))
static void free_huge_page(struct page *page);
static void flush_hash_hugepage(mm_context_t context, unsigned long ea, static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
hugepte_t pte, int local); hugepte_t pte, int local);
...@@ -146,27 +86,6 @@ static inline void set_hugepte(hugepte_t *ptep, hugepte_t pte) ...@@ -146,27 +86,6 @@ static inline void set_hugepte(hugepte_t *ptep, hugepte_t pte)
hugepte_val(pte) & ~_HUGEPAGE_HPTEFLAGS); hugepte_val(pte) & ~_HUGEPAGE_HPTEFLAGS);
} }
static struct page *alloc_hugetlb_page(void)
{
int i;
struct page *page;
spin_lock(&htlbpage_lock);
page = dequeue_huge_page();
if (!page) {
spin_unlock(&htlbpage_lock);
return NULL;
}
htlbpage_free--;
spin_unlock(&htlbpage_lock);
set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page;
for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
clear_highpage(&page[i]);
return page;
}
static hugepte_t *hugepte_alloc(struct mm_struct *mm, unsigned long addr) static hugepte_t *hugepte_alloc(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
...@@ -448,26 +367,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -448,26 +367,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return page; return page;
} }
static void free_huge_page(struct page *page)
{
BUG_ON(page_count(page));
INIT_LIST_HEAD(&page->lru);
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
htlbpage_free++;
spin_unlock(&htlbpage_lock);
}
void huge_page_release(struct page *page)
{
if (!put_page_testzero(page))
return;
free_huge_page(page);
}
void unmap_hugepage_range(struct vm_area_struct *vma, void unmap_hugepage_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
...@@ -510,16 +409,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, ...@@ -510,16 +409,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
mm->rss -= (end - start) >> PAGE_SHIFT; mm->rss -= (end - start) >> PAGE_SHIFT;
} }
void zap_hugepage_range(struct vm_area_struct *vma,
unsigned long start, unsigned long length)
{
struct mm_struct *mm = vma->vm_mm;
spin_lock(&mm->page_table_lock);
unmap_hugepage_range(vma, start, start + length);
spin_unlock(&mm->page_table_lock);
}
int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
...@@ -554,7 +443,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -554,7 +443,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
page = alloc_hugetlb_page(); page = alloc_huge_page();
if (!page) { if (!page) {
hugetlb_put_quota(mapping); hugetlb_put_quota(mapping);
ret = -ENOMEM; ret = -ENOMEM;
...@@ -876,148 +765,3 @@ static void flush_hash_hugepage(mm_context_t context, unsigned long ea, ...@@ -876,148 +765,3 @@ static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
ppc_md.hpte_invalidate(slot, va, 1, local); ppc_md.hpte_invalidate(slot, va, 1, local);
} }
static void split_and_free_hugepage(struct page *page)
{
int j;
struct page *map;
map = page;
htlbpage_total--;
for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1<< PG_writeback);
set_page_count(map, 0);
map++;
}
set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
}
int set_hugetlb_mem_size(int count)
{
int lcount;
struct page *page;
if (!(cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE))
return 0;
if (count < 0)
lcount = count;
else
lcount = count - htlbpage_total;
if (lcount == 0)
return htlbpage_total;
if (lcount > 0) { /* Increase the mem size. */
while (lcount--) {
page = alloc_fresh_huge_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
htlbpage_free++;
htlbpage_total++;
spin_unlock(&htlbpage_lock);
}
return htlbpage_total;
}
/* Shrink the memory size. */
while (lcount++) {
page = alloc_hugetlb_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
split_and_free_hugepage(page);
spin_unlock(&htlbpage_lock);
}
return htlbpage_total;
}
int hugetlb_sysctl_handler(ctl_table *table, int write,
struct file *file, void *buffer, size_t *length)
{
proc_dointvec(table, write, file, buffer, length);
htlbpage_max = set_hugetlb_mem_size(htlbpage_max);
return 0;
}
static int __init hugetlb_setup(char *s)
{
if (sscanf(s, "%d", &htlbpage_max) <= 0)
htlbpage_max = 0;
return 1;
}
__setup("hugepages=", hugetlb_setup);
static int __init hugetlb_init(void)
{
int i;
struct page *page;
if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) {
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&hugepage_freelists[i]);
for (i = 0; i < htlbpage_max; ++i) {
page = alloc_fresh_huge_page();
if (!page)
break;
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
spin_unlock(&htlbpage_lock);
}
htlbpage_max = htlbpage_free = htlbpage_total = i;
printk(KERN_INFO "Total HugeTLB memory allocated, %d\n",
htlbpage_free);
} else {
htlbpage_max = 0;
printk(KERN_INFO "CPU does not support HugeTLB\n");
}
return 0;
}
module_init(hugetlb_init);
int hugetlb_report_meminfo(char *buf)
{
return sprintf(buf,
"HugePages_Total: %5d\n"
"HugePages_Free: %5d\n"
"Hugepagesize: %5lu kB\n",
htlbpage_total,
htlbpage_free,
HPAGE_SIZE/1024);
}
/* This is advisory only, so we can get away with accesing
* htlbpage_free without taking the lock. */
int is_hugepage_mem_enough(size_t size)
{
return (size + ~HPAGE_MASK)/HPAGE_SIZE <= htlbpage_free;
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
return htlbpage_total * (HPAGE_SIZE / PAGE_SIZE);
}
EXPORT_SYMBOL(hugetlb_total_pages);
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far.
*/
static struct page *hugetlb_nopage(struct vm_area_struct *vma,
unsigned long address, int *unused)
{
BUG();
return NULL;
}
struct vm_operations_struct hugetlb_vm_ops = {
.nopage = hugetlb_nopage,
};
...@@ -24,68 +24,6 @@ ...@@ -24,68 +24,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
static long htlbpagemem;
int htlbpage_max;
static long htlbzone_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
static void enqueue_huge_page(struct page *page)
{
list_add(&page->lru,
&hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
}
static struct page *dequeue_huge_page(void)
{
int nid = numa_node_id();
struct page *page = NULL;
if (list_empty(&hugepage_freelists[nid])) {
for (nid = 0; nid < MAX_NUMNODES; ++nid)
if (!list_empty(&hugepage_freelists[nid]))
break;
}
if (nid >= 0 && nid < MAX_NUMNODES &&
!list_empty(&hugepage_freelists[nid])) {
page = list_entry(hugepage_freelists[nid].next,
struct page, list);
list_del(&page->lru);
}
return page;
}
static struct page *alloc_fresh_huge_page(void)
{
static int nid = 0;
struct page *page;
page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP,
HUGETLB_PAGE_ORDER);
nid = (nid + 1) % numnodes;
return page;
}
static void free_huge_page(struct page *page);
static struct page *alloc_hugetlb_page(void)
{
struct page *page;
spin_lock(&htlbpage_lock);
page = dequeue_huge_page();
if (!page) {
spin_unlock(&htlbpage_lock);
return NULL;
}
htlbpagemem--;
spin_unlock(&htlbpage_lock);
set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page;
memset(page_address(page), 0, HPAGE_SIZE);
return page;
}
static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
...@@ -250,25 +188,6 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -250,25 +188,6 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL; return NULL;
} }
static void free_huge_page(struct page *page)
{
BUG_ON(page_count(page));
BUG_ON(page->mapping);
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
htlbpagemem++;
spin_unlock(&htlbpage_lock);
}
void huge_page_release(struct page *page)
{
if (!put_page_testzero(page))
return;
free_huge_page(page);
}
void unmap_hugepage_range(struct vm_area_struct *vma, void unmap_hugepage_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
...@@ -297,16 +216,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, ...@@ -297,16 +216,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
} }
void zap_hugepage_range(struct vm_area_struct *vma,
unsigned long start, unsigned long length)
{
struct mm_struct *mm = vma->vm_mm;
spin_lock(&mm->page_table_lock);
unmap_hugepage_range(vma, start, start + length);
spin_unlock(&mm->page_table_lock);
}
int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
...@@ -338,7 +247,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -338,7 +247,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
page = alloc_hugetlb_page(); page = alloc_huge_page();
if (!page) { if (!page) {
hugetlb_put_quota(mapping); hugetlb_put_quota(mapping);
ret = -ENOMEM; ret = -ENOMEM;
...@@ -358,168 +267,3 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -358,168 +267,3 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return ret; return ret;
} }
static void update_and_free_page(struct page *page)
{
int j;
struct page *map;
map = page;
htlbzone_pages--;
for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1<< PG_writeback);
set_page_count(map, 0);
map++;
}
set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
}
static int try_to_free_low(int count)
{
struct list_head *p;
struct page *page, *map;
map = NULL;
spin_lock(&htlbpage_lock);
/* all lowmem is on node 0 */
list_for_each(p, &hugepage_freelists[0]) {
if (map) {
list_del(&map->lru);
update_and_free_page(map);
htlbpagemem--;
map = NULL;
if (++count == 0)
break;
}
page = list_entry(p, struct page, list);
if (!PageHighMem(page))
map = page;
}
if (map) {
list_del(&map->lru);
update_and_free_page(map);
htlbpagemem--;
count++;
}
spin_unlock(&htlbpage_lock);
return count;
}
static int set_hugetlb_mem_size(int count)
{
int lcount;
struct page *page;
if (count < 0)
lcount = count;
else
lcount = count - htlbzone_pages;
if (lcount == 0)
return (int)htlbzone_pages;
if (lcount > 0) { /* Increase the mem size. */
while (lcount--) {
page = alloc_fresh_huge_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
htlbpagemem++;
htlbzone_pages++;
spin_unlock(&htlbpage_lock);
}
return (int) htlbzone_pages;
}
/* Shrink the memory size. */
lcount = try_to_free_low(lcount);
while (lcount++) {
page = alloc_hugetlb_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
update_and_free_page(page);
spin_unlock(&htlbpage_lock);
}
return (int) htlbzone_pages;
}
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
struct file *file, void *buffer, size_t *length)
{
proc_dointvec(table, write, file, buffer, length);
htlbpage_max = set_hugetlb_mem_size(htlbpage_max);
return 0;
}
static int __init hugetlb_setup(char *s)
{
if (sscanf(s, "%d", &htlbpage_max) <= 0)
htlbpage_max = 0;
return 1;
}
__setup("hugepages=", hugetlb_setup);
static int __init hugetlb_init(void)
{
int i;
struct page *page;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&hugepage_freelists[i]);
for (i = 0; i < htlbpage_max; ++i) {
page = alloc_fresh_huge_page();
if (!page)
break;
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
spin_unlock(&htlbpage_lock);
}
htlbpage_max = htlbpagemem = htlbzone_pages = i;
printk("Total HugeTLB memory allocated, %ld\n", htlbpagemem);
return 0;
}
module_init(hugetlb_init);
int hugetlb_report_meminfo(char *buf)
{
return sprintf(buf,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
"Hugepagesize: %5lu kB\n",
htlbzone_pages,
htlbpagemem,
HPAGE_SIZE/1024);
}
int is_hugepage_mem_enough(size_t size)
{
return (size + ~HPAGE_MASK)/HPAGE_SIZE <= htlbpagemem;
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
return htlbzone_pages * (HPAGE_SIZE / PAGE_SIZE);
}
EXPORT_SYMBOL(hugetlb_total_pages);
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far.
*/
static struct page *hugetlb_nopage(struct vm_area_struct *vma,
unsigned long address, int *unused)
{
BUG();
return NULL;
}
struct vm_operations_struct hugetlb_vm_ops = {
.nopage = hugetlb_nopage,
};
...@@ -21,68 +21,6 @@ ...@@ -21,68 +21,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
static long htlbpagemem;
int htlbpage_max;
static long htlbzone_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
static void enqueue_huge_page(struct page *page)
{
list_add(&page->lru,
&hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
}
static struct page *dequeue_huge_page(void)
{
int nid = numa_node_id();
struct page *page = NULL;
if (list_empty(&hugepage_freelists[nid])) {
for (nid = 0; nid < MAX_NUMNODES; ++nid)
if (!list_empty(&hugepage_freelists[nid]))
break;
}
if (nid >= 0 && nid < MAX_NUMNODES &&
!list_empty(&hugepage_freelists[nid])) {
page = list_entry(hugepage_freelists[nid].next,
struct page, lru);
list_del(&page->lru);
}
return page;
}
static struct page *alloc_fresh_huge_page(void)
{
static int nid = 0;
struct page *page;
page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP,
HUGETLB_PAGE_ORDER);
nid = (nid + 1) % numnodes;
return page;
}
static void free_huge_page(struct page *page);
static struct page *alloc_hugetlb_page(void)
{
struct page *page;
spin_lock(&htlbpage_lock);
page = dequeue_huge_page();
if (!page) {
spin_unlock(&htlbpage_lock);
return NULL;
}
htlbpagemem--;
spin_unlock(&htlbpage_lock);
set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page;
memset(page_address(page), 0, HPAGE_SIZE);
return page;
}
static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pgd; pgd_t *pgd;
...@@ -247,26 +185,6 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -247,26 +185,6 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL; return NULL;
} }
static void free_huge_page(struct page *page)
{
BUG_ON(page_count(page));
INIT_LIST_HEAD(&page->lru);
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
htlbpagemem++;
spin_unlock(&htlbpage_lock);
}
void huge_page_release(struct page *page)
{
if (!put_page_testzero(page))
return;
free_huge_page(page);
}
void unmap_hugepage_range(struct vm_area_struct *vma, void unmap_hugepage_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
...@@ -295,16 +213,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, ...@@ -295,16 +213,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
} }
void zap_hugepage_range(struct vm_area_struct *vma,
unsigned long start, unsigned long length)
{
struct mm_struct *mm = vma->vm_mm;
spin_lock(&mm->page_table_lock);
unmap_hugepage_range(vma, start, start + length);
spin_unlock(&mm->page_table_lock);
}
int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
...@@ -336,7 +244,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -336,7 +244,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
page = alloc_hugetlb_page(); page = alloc_huge_page();
if (!page) { if (!page) {
hugetlb_put_quota(mapping); hugetlb_put_quota(mapping);
ret = -ENOMEM; ret = -ENOMEM;
...@@ -356,168 +264,3 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ...@@ -356,168 +264,3 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return ret; return ret;
} }
static void update_and_free_page(struct page *page)
{
int j;
struct page *map;
map = page;
htlbzone_pages--;
for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1<< PG_writeback);
set_page_count(map, 0);
map++;
}
set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
}
static int try_to_free_low(int count)
{
struct list_head *p;
struct page *page, *map;
map = NULL;
spin_lock(&htlbpage_lock);
/* all lowmem is on node 0 */
list_for_each(p, &hugepage_freelists[0]) {
if (map) {
list_del(&map->lru);
update_and_free_page(map);
htlbpagemem--;
map = NULL;
if (++count == 0)
break;
}
page = list_entry(p, struct page, lru);
if (!PageHighMem(page))
map = page;
}
if (map) {
list_del(&map->lru);
update_and_free_page(map);
htlbpagemem--;
count++;
}
spin_unlock(&htlbpage_lock);
return count;
}
static int set_hugetlb_mem_size(int count)
{
int lcount;
struct page *page;
if (count < 0)
lcount = count;
else
lcount = count - htlbzone_pages;
if (lcount == 0)
return (int)htlbzone_pages;
if (lcount > 0) { /* Increase the mem size. */
while (lcount--) {
page = alloc_fresh_huge_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
htlbpagemem++;
htlbzone_pages++;
spin_unlock(&htlbpage_lock);
}
return (int) htlbzone_pages;
}
/* Shrink the memory size. */
lcount = try_to_free_low(lcount);
while (lcount++) {
page = alloc_hugetlb_page();
if (page == NULL)
break;
spin_lock(&htlbpage_lock);
update_and_free_page(page);
spin_unlock(&htlbpage_lock);
}
return (int) htlbzone_pages;
}
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
struct file *file, void *buffer, size_t *length)
{
proc_dointvec(table, write, file, buffer, length);
htlbpage_max = set_hugetlb_mem_size(htlbpage_max);
return 0;
}
static int __init hugetlb_setup(char *s)
{
if (sscanf(s, "%d", &htlbpage_max) <= 0)
htlbpage_max = 0;
return 1;
}
__setup("hugepages=", hugetlb_setup);
static int __init hugetlb_init(void)
{
int i;
struct page *page;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&hugepage_freelists[i]);
for (i = 0; i < htlbpage_max; ++i) {
page = alloc_fresh_huge_page();
if (!page)
break;
spin_lock(&htlbpage_lock);
enqueue_huge_page(page);
spin_unlock(&htlbpage_lock);
}
htlbpage_max = htlbpagemem = htlbzone_pages = i;
printk("Total HugeTLB memory allocated, %ld\n", htlbpagemem);
return 0;
}
module_init(hugetlb_init);
int hugetlb_report_meminfo(char *buf)
{
return sprintf(buf,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
"Hugepagesize: %5lu kB\n",
htlbzone_pages,
htlbpagemem,
HPAGE_SIZE/1024);
}
int is_hugepage_mem_enough(size_t size)
{
return (size + ~HPAGE_MASK)/HPAGE_SIZE <= htlbpagemem;
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
return htlbzone_pages * (HPAGE_SIZE / PAGE_SIZE);
}
EXPORT_SYMBOL(hugetlb_total_pages);
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far.
*/
static struct page *hugetlb_nopage(struct vm_area_struct *vma,
unsigned long address, int *unused)
{
BUG();
return NULL;
}
struct vm_operations_struct hugetlb_vm_ops = {
.nopage = hugetlb_nopage,
};
...@@ -573,7 +573,7 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) ...@@ -573,7 +573,7 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
unsigned long long size = memparse(value, &rest); unsigned long long size = memparse(value, &rest);
if (*rest == '%') { if (*rest == '%') {
size <<= HPAGE_SHIFT; size <<= HPAGE_SHIFT;
size *= htlbpage_max; size *= max_huge_pages;
do_div(size, 100); do_div(size, 100);
rest++; rest++;
} }
......
...@@ -28,8 +28,11 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, ...@@ -28,8 +28,11 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int write); pmd_t *pmd, int write);
int is_aligned_hugepage_range(unsigned long addr, unsigned long len); int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
int pmd_huge(pmd_t pmd); int pmd_huge(pmd_t pmd);
struct page *alloc_huge_page(void);
void free_huge_page(struct page *);
extern int htlbpage_max; extern unsigned long max_huge_pages;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
static inline void static inline void
mark_mm_hugetlb(struct mm_struct *mm, struct vm_area_struct *vma) mark_mm_hugetlb(struct mm_struct *mm, struct vm_area_struct *vma)
...@@ -78,6 +81,8 @@ static inline unsigned long hugetlb_total_pages(void) ...@@ -78,6 +81,8 @@ static inline unsigned long hugetlb_total_pages(void)
#define pmd_huge(x) 0 #define pmd_huge(x) 0
#define is_hugepage_only_range(addr, len) 0 #define is_hugepage_only_range(addr, len) 0
#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0) #define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
#define alloc_huge_page() ({ NULL; })
#define free_huge_page(p) ({ (void)(p); BUG(); })
#ifndef HPAGE_MASK #ifndef HPAGE_MASK
#define HPAGE_MASK 0 /* Keep the compiler happy */ #define HPAGE_MASK 0 /* Keep the compiler happy */
......
...@@ -710,10 +710,12 @@ static ctl_table vm_table[] = { ...@@ -710,10 +710,12 @@ static ctl_table vm_table[] = {
{ {
.ctl_name = VM_HUGETLB_PAGES, .ctl_name = VM_HUGETLB_PAGES,
.procname = "nr_hugepages", .procname = "nr_hugepages",
.data = &htlbpage_max, .data = &max_huge_pages,
.maxlen = sizeof(int), .maxlen = sizeof(unsigned long),
.mode = 0644, .mode = 0644,
.proc_handler = &hugetlb_sysctl_handler, .proc_handler = &hugetlb_sysctl_handler,
.extra1 = (void *)&hugetlb_zero,
.extra2 = (void *)&hugetlb_infinity,
}, },
#endif #endif
{ {
......
...@@ -12,3 +12,4 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ ...@@ -12,3 +12,4 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
slab.o swap.o truncate.o vmscan.o $(mmu-y) slab.o swap.o truncate.o vmscan.o $(mmu-y)
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
/*
* Generic hugetlb support.
* (C) William Irwin, April 2004
*/
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/sysctl.h>
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
static unsigned long nr_huge_pages, free_huge_pages;
unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static spinlock_t hugetlb_lock = SPIN_LOCK_UNLOCKED;
static void enqueue_huge_page(struct page *page)
{
list_add(&page->lru,
&hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
}
static struct page *dequeue_huge_page(void)
{
int nid = numa_node_id();
struct page *page = NULL;
if (list_empty(&hugepage_freelists[nid])) {
for (nid = 0; nid < MAX_NUMNODES; ++nid)
if (!list_empty(&hugepage_freelists[nid]))
break;
}
if (nid >= 0 && nid < MAX_NUMNODES &&
!list_empty(&hugepage_freelists[nid])) {
page = list_entry(hugepage_freelists[nid].next,
struct page, lru);
list_del(&page->lru);
}
return page;
}
static struct page *alloc_fresh_huge_page(void)
{
static int nid = 0;
struct page *page;
page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP,
HUGETLB_PAGE_ORDER);
nid = (nid + 1) % numnodes;
return page;
}
void free_huge_page(struct page *page)
{
BUG_ON(page_count(page));
INIT_LIST_HEAD(&page->lru);
spin_lock(&hugetlb_lock);
enqueue_huge_page(page);
free_huge_pages++;
spin_unlock(&hugetlb_lock);
}
struct page *alloc_huge_page(void)
{
struct page *page;
spin_lock(&hugetlb_lock);
page = dequeue_huge_page();
if (!page) {
spin_unlock(&hugetlb_lock);
return NULL;
}
free_huge_pages--;
spin_unlock(&hugetlb_lock);
set_page_count(page, 1);
page->lru.prev = (void *)free_huge_page;
memset(page_address(page), 0, HPAGE_SIZE);
return page;
}
void huge_page_release(struct page *page)
{
if (!put_page_testzero(page))
return;
free_huge_page(page);
}
static int __init hugetlb_init(void)
{
unsigned long i;
struct page *page;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&hugepage_freelists[i]);
for (i = 0; i < max_huge_pages; ++i) {
page = alloc_fresh_huge_page();
if (!page)
break;
spin_lock(&hugetlb_lock);
enqueue_huge_page(page);
spin_unlock(&hugetlb_lock);
}
max_huge_pages = free_huge_pages = nr_huge_pages = i;
printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
return 0;
}
module_init(hugetlb_init);
static int __init hugetlb_setup(char *s)
{
if (sscanf(s, "%lu", &max_huge_pages) <= 0)
max_huge_pages = 0;
return 1;
}
__setup("hugepages=", hugetlb_setup);
static void update_and_free_page(struct page *page)
{
int i;
nr_huge_pages--;
for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1<< PG_writeback);
set_page_count(&page[i], 0);
}
set_page_count(page, 1);
__free_pages(page, HUGETLB_PAGE_ORDER);
}
#ifdef CONFIG_HIGHMEM
static int try_to_free_low(unsigned long count)
{
int i;
for (i = 0; i < MAX_NUMNODES; ++i) {
struct page *page;
list_for_each_entry(page, &hugepage_freelists[i], lru) {
if (PageHighMem(page))
continue;
list_del(&page->lru);
update_and_free_page(page);
--free_huge_pages;
if (!--count)
return 0;
}
}
return count;
}
#else
static inline int try_to_free_low(unsigned long count)
{
return count;
}
#endif
static unsigned long set_max_huge_pages(unsigned long count)
{
while (count > nr_huge_pages) {
struct page *page = alloc_fresh_huge_page();
if (!page)
return nr_huge_pages;
spin_lock(&hugetlb_lock);
enqueue_huge_page(page);
free_huge_pages++;
nr_huge_pages++;
spin_unlock(&hugetlb_lock);
}
if (count >= nr_huge_pages)
return nr_huge_pages;
spin_lock(&hugetlb_lock);
for (count = try_to_free_low(count); count < nr_huge_pages; --free_huge_pages) {
struct page *page = dequeue_huge_page();
if (!page)
break;
update_and_free_page(page);
}
spin_unlock(&hugetlb_lock);
return nr_huge_pages;
}
#ifdef CONFIG_SYSCTL
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
struct file *file, void *buffer, size_t *length)
{
proc_doulongvec_minmax(table, write, file, buffer, length);
max_huge_pages = set_max_huge_pages(max_huge_pages);
return 0;
}
#endif /* CONFIG_SYSCTL */
int hugetlb_report_meminfo(char *buf)
{
return sprintf(buf,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
"Hugepagesize: %5lu kB\n",
nr_huge_pages,
free_huge_pages,
HPAGE_SIZE/1024);
}
int is_hugepage_mem_enough(size_t size)
{
return (size + ~HPAGE_MASK)/HPAGE_SIZE <= free_huge_pages;
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
}
EXPORT_SYMBOL(hugetlb_total_pages);
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far.
*/
static struct page *hugetlb_nopage(struct vm_area_struct *vma,
unsigned long address, int *unused)
{
BUG();
return NULL;
}
struct vm_operations_struct hugetlb_vm_ops = {
.nopage = hugetlb_nopage,
};
void zap_hugepage_range(struct vm_area_struct *vma,
unsigned long start, unsigned long length)
{
struct mm_struct *mm = vma->vm_mm;
spin_lock(&mm->page_table_lock);
unmap_hugepage_range(vma, start, start + length);
spin_unlock(&mm->page_table_lock);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment