Commit 5ea74d2d authored by Kenneth W. Chen's avatar Kenneth W. Chen Committed by Linus Torvalds

[PATCH] per node huge page stats in sysfs

It adds per node huge page stats in sysfs.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent ed51aeb3
......@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/node.h>
#include <linux/hugetlb.h>
#include <linux/cpumask.h>
#include <linux/topology.h>
......@@ -31,12 +32,6 @@ static ssize_t node_read_cpumap(struct sys_device * dev, char * buf)
static SYSDEV_ATTR(cpumap, S_IRUGO, node_read_cpumap, NULL);
/* Can be overwritten by architecture specific code. */
int __attribute__((weak)) hugetlb_report_node_meminfo(int node, char *buf)
{
return 0;
}
#define K(x) ((x) << (PAGE_SHIFT - 10))
static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
{
......
......@@ -19,6 +19,7 @@ void zap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
int hugetlb_report_meminfo(char *);
int hugetlb_report_node_meminfo(int, char *);
int is_hugepage_mem_enough(size_t);
unsigned long hugetlb_total_pages(void);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
......@@ -72,6 +73,7 @@ static inline unsigned long hugetlb_total_pages(void)
#define unmap_hugepage_range(vma, start, end) BUG()
#define is_hugepage_mem_enough(size) 0
#define hugetlb_report_meminfo(buf) 0
#define hugetlb_report_node_meminfo(n, buf) 0
#define mark_mm_hugetlb(mm, vma) do { } while (0)
#define follow_huge_pmd(mm, addr, pmd, write) 0
#define is_aligned_hugepage_range(addr, len) 0
......
......@@ -15,12 +15,16 @@ const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
static unsigned long nr_huge_pages, free_huge_pages;
unsigned long max_huge_pages;
static struct list_head hugepage_freelists[MAX_NUMNODES];
static unsigned int nr_huge_pages_node[MAX_NUMNODES];
static unsigned int free_huge_pages_node[MAX_NUMNODES];
static spinlock_t hugetlb_lock = SPIN_LOCK_UNLOCKED;
static void enqueue_huge_page(struct page *page)
{
list_add(&page->lru,
&hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
int nid = page_zone(page)->zone_pgdat->node_id;
list_add(&page->lru, &hugepage_freelists[nid]);
free_huge_pages++;
free_huge_pages_node[nid]++;
}
static struct page *dequeue_huge_page(void)
......@@ -38,6 +42,8 @@ static struct page *dequeue_huge_page(void)
page = list_entry(hugepage_freelists[nid].next,
struct page, lru);
list_del(&page->lru);
free_huge_pages--;
free_huge_pages_node[nid]--;
}
return page;
}
......@@ -49,6 +55,10 @@ static struct page *alloc_fresh_huge_page(void)
page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP,
HUGETLB_PAGE_ORDER);
nid = (nid + 1) % numnodes;
if (page) {
nr_huge_pages++;
nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]++;
}
return page;
}
......@@ -61,7 +71,6 @@ void free_huge_page(struct page *page)
spin_lock(&hugetlb_lock);
enqueue_huge_page(page);
free_huge_pages++;
spin_unlock(&hugetlb_lock);
}
......@@ -76,7 +85,6 @@ struct page *alloc_huge_page(void)
spin_unlock(&hugetlb_lock);
return NULL;
}
free_huge_pages--;
spin_unlock(&hugetlb_lock);
set_page_count(page, 1);
page[1].mapping = (void *)free_huge_page;
......@@ -119,6 +127,7 @@ static void update_and_free_page(struct page *page)
{
int i;
nr_huge_pages--;
nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]--;
for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
......@@ -132,7 +141,7 @@ static void update_and_free_page(struct page *page)
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(unsigned long count)
{
int i;
int i, nid;
for (i = 0; i < MAX_NUMNODES; ++i) {
struct page *page, *next;
list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
......@@ -140,7 +149,9 @@ static void try_to_free_low(unsigned long count)
continue;
list_del(&page->lru);
update_and_free_page(page);
--free_huge_pages;
nid = page_zone(page)->zone_pgdat->node_id;
free_huge_pages--;
free_huge_pages_node[nid]--;
if (count >= nr_huge_pages)
return;
}
......@@ -160,8 +171,6 @@ static unsigned long set_max_huge_pages(unsigned long count)
return nr_huge_pages;
spin_lock(&hugetlb_lock);
enqueue_huge_page(page);
free_huge_pages++;
nr_huge_pages++;
spin_unlock(&hugetlb_lock);
}
if (count >= nr_huge_pages)
......@@ -169,7 +178,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
spin_lock(&hugetlb_lock);
try_to_free_low(count);
for (; count < nr_huge_pages; --free_huge_pages) {
while (count < nr_huge_pages) {
struct page *page = dequeue_huge_page();
if (!page)
break;
......@@ -201,6 +210,15 @@ int hugetlb_report_meminfo(char *buf)
HPAGE_SIZE/1024);
}
int hugetlb_report_node_meminfo(int nid, char *buf)
{
return sprintf(buf,
"Node %d HugePages_Total: %5u\n"
"Node %d HugePages_Free: %5u\n",
nid, nr_huge_pages_node[nid],
nid, free_huge_pages_node[nid]);
}
int is_hugepage_mem_enough(size_t size)
{
return (size + ~HPAGE_MASK)/HPAGE_SIZE <= free_huge_pages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment