Commit c1ab3459 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] show_free_areas() cleanup

Cleanup to show_free_areas() from Bill Irwin:

show_free_areas() and show_free_areas_core() is a mess.
(1) it uses a bizarre and ugly form of list iteration to walk buddy lists
        use standard list functions instead
(2) it prints the same information repeatedly once per-node
        rationalize the braindamaged iteration logic
(3) show_free_areas_node() is useless and not called anywhere
        remove it entirely
(4) show_free_areas() itself just calls show_free_areas_core()
        remove show_free_areas_core() and do the stuff directly
(5) SWAP_CACHE_INFO is always #defined, remove it
(6) INC_CACHE_INFO() doesn't use the do { } while (0) construct

This patch also includes Matthew Dobson's patch which removes
mm/numa.c:node_lock.  The consensus is that it doesn't do anything now
that show_free_areas_node() isn't there.
parent cbb6e8ec
...@@ -327,7 +327,6 @@ static inline void set_page_zone(struct page *page, unsigned long zone_num) ...@@ -327,7 +327,6 @@ static inline void set_page_zone(struct page *page, unsigned long zone_num)
extern struct page *mem_map; extern struct page *mem_map;
extern void show_free_areas(void); extern void show_free_areas(void);
extern void show_free_areas_node(pg_data_t *pgdat);
extern int fail_writepage(struct page *); extern int fail_writepage(struct page *);
struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused); struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused);
......
...@@ -176,10 +176,7 @@ int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page); ...@@ -176,10 +176,7 @@ int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page);
/* linux/mm/page_alloc.c */ /* linux/mm/page_alloc.c */
/* linux/mm/swap_state.c */ /* linux/mm/swap_state.c */
#define SWAP_CACHE_INFO
#ifdef SWAP_CACHE_INFO
extern void show_swap_cache_info(void); extern void show_swap_cache_info(void);
#endif
extern int add_to_swap_cache(struct page *, swp_entry_t); extern int add_to_swap_cache(struct page *, swp_entry_t);
extern int add_to_swap(struct page *); extern int add_to_swap(struct page *);
extern void __delete_from_swap_cache(struct page *page); extern void __delete_from_swap_cache(struct page *page);
......
...@@ -44,17 +44,6 @@ struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int orde ...@@ -44,17 +44,6 @@ struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int orde
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
static spinlock_t node_lock = SPIN_LOCK_UNLOCKED;
void show_free_areas_node(pg_data_t *pgdat)
{
unsigned long flags;
spin_lock_irqsave(&node_lock, flags);
show_free_areas_core(pgdat);
spin_unlock_irqrestore(&node_lock, flags);
}
/* /*
* Nodes can be initialized parallely, in no particular order. * Nodes can be initialized parallely, in no particular order.
*/ */
...@@ -106,11 +95,10 @@ struct page * _alloc_pages(unsigned int gfp_mask, unsigned int order) ...@@ -106,11 +95,10 @@ struct page * _alloc_pages(unsigned int gfp_mask, unsigned int order)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
temp = NODE_DATA(numa_node_id()); temp = NODE_DATA(numa_node_id());
#else #else
spin_lock_irqsave(&node_lock, flags); if (!next)
if (!next) next = pgdat_list; next = pgdat_list;
temp = next; temp = next;
next = next->node_next; next = next->node_next;
spin_unlock_irqrestore(&node_lock, flags);
#endif #endif
start = temp; start = temp;
while (temp) { while (temp) {
......
...@@ -601,12 +601,11 @@ void si_meminfo(struct sysinfo *val) ...@@ -601,12 +601,11 @@ void si_meminfo(struct sysinfo *val)
* We also calculate the percentage fragmentation. We do this by counting the * We also calculate the percentage fragmentation. We do this by counting the
* memory on each free list with the exception of the first item on the list. * memory on each free list with the exception of the first item on the list.
*/ */
void show_free_areas_core(pg_data_t *pgdat) void show_free_areas(void)
{ {
unsigned int order; pg_data_t *pgdat;
unsigned type;
pg_data_t *tmpdat = pgdat;
struct page_state ps; struct page_state ps;
int type;
get_page_state(&ps); get_page_state(&ps);
...@@ -614,20 +613,20 @@ void show_free_areas_core(pg_data_t *pgdat) ...@@ -614,20 +613,20 @@ void show_free_areas_core(pg_data_t *pgdat)
K(nr_free_pages()), K(nr_free_pages()),
K(nr_free_highpages())); K(nr_free_highpages()));
while (tmpdat) { for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next)
zone_t *zone; for (type = 0; type < MAX_NR_ZONES; ++type) {
for (zone = tmpdat->node_zones; zone_t *zone = &pgdat->node_zones[type];
zone < tmpdat->node_zones + MAX_NR_ZONES; zone++) printk("Zone:%s "
printk("Zone:%s freepages:%6lukB min:%6lukB low:%6lukB " "freepages:%6lukB "
"high:%6lukB\n", "min:%6lukB "
zone->name, "low:%6lukB "
K(zone->free_pages), "high:%6lukB\n",
K(zone->pages_min), zone->name,
K(zone->pages_low), K(zone->free_pages),
K(zone->pages_high)); K(zone->pages_min),
K(zone->pages_low),
tmpdat = tmpdat->node_next; K(zone->pages_high));
} }
printk("( Active:%lu inactive:%lu dirty:%lu writeback:%lu free:%u )\n", printk("( Active:%lu inactive:%lu dirty:%lu writeback:%lu free:%u )\n",
ps.nr_active, ps.nr_active,
...@@ -636,40 +635,28 @@ void show_free_areas_core(pg_data_t *pgdat) ...@@ -636,40 +635,28 @@ void show_free_areas_core(pg_data_t *pgdat)
ps.nr_writeback, ps.nr_writeback,
nr_free_pages()); nr_free_pages());
for (type = 0; type < MAX_NR_ZONES; type++) { for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next)
struct list_head *head, *curr; for (type = 0; type < MAX_NR_ZONES; type++) {
zone_t *zone = pgdat->node_zones + type; list_t *elem;
unsigned long nr, total, flags; zone_t *zone = &pgdat->node_zones[type];
unsigned long nr, flags, order, total = 0;
if (!zone->size)
continue;
total = 0;
if (zone->size) {
spin_lock_irqsave(&zone->lock, flags); spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order < MAX_ORDER; order++) {
head = &(zone->free_area + order)->free_list;
curr = head;
nr = 0; nr = 0;
for (;;) { list_for_each(elem, &zone->free_area[order].free_list)
curr = curr->next; ++nr;
if (curr == head) total += nr << order;
break;
nr++;
}
total += nr * (1 << order);
printk("%lu*%lukB ", nr, K(1UL) << order); printk("%lu*%lukB ", nr, K(1UL) << order);
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
printk("= %lukB)\n", K(total));
} }
printk("= %lukB)\n", K(total));
}
#ifdef SWAP_CACHE_INFO
show_swap_cache_info(); show_swap_cache_info();
#endif
}
void show_free_areas(void)
{
show_free_areas_core(pgdat_list);
} }
/* /*
......
...@@ -42,8 +42,7 @@ struct address_space swapper_space = { ...@@ -42,8 +42,7 @@ struct address_space swapper_space = {
private_list: LIST_HEAD_INIT(swapper_space.private_list), private_list: LIST_HEAD_INIT(swapper_space.private_list),
}; };
#ifdef SWAP_CACHE_INFO #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
#define INC_CACHE_INFO(x) (swap_cache_info.x++)
static struct { static struct {
unsigned long add_total; unsigned long add_total;
...@@ -61,9 +60,6 @@ void show_swap_cache_info(void) ...@@ -61,9 +60,6 @@ void show_swap_cache_info(void)
swap_cache_info.find_success, swap_cache_info.find_total, swap_cache_info.find_success, swap_cache_info.find_total,
swap_cache_info.noent_race, swap_cache_info.exist_race); swap_cache_info.noent_race, swap_cache_info.exist_race);
} }
#else
#define INC_CACHE_INFO(x) do { } while (0)
#endif
int add_to_swap_cache(struct page *page, swp_entry_t entry) int add_to_swap_cache(struct page *page, swp_entry_t entry)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment