Commit 02b694de authored by Yasunori Goto's avatar Yasunori Goto Committed by Linus Torvalds

[PATCH] wait_table and zonelist initializing for memory hotadd: change name of wait_table_size()

This is just to rename from wait_table_size() to wait_table_hash_nr_entries().
Signed-off-by: default avatarYasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 3c5a87f4
...@@ -197,7 +197,7 @@ struct zone { ...@@ -197,7 +197,7 @@ struct zone {
/* /*
* wait_table -- the array holding the hash table * wait_table -- the array holding the hash table
* wait_table_size -- the size of the hash table array * wait_table_hash_nr_entries -- the size of the hash table array
* wait_table_bits -- wait_table_size == (1 << wait_table_bits) * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
* *
* The purpose of all these is to keep track of the people * The purpose of all these is to keep track of the people
...@@ -220,7 +220,7 @@ struct zone { ...@@ -220,7 +220,7 @@ struct zone {
* free_area_init_core() performs the initialization of them. * free_area_init_core() performs the initialization of them.
*/ */
wait_queue_head_t * wait_table; wait_queue_head_t * wait_table;
unsigned long wait_table_size; unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits; unsigned long wait_table_bits;
/* /*
......
...@@ -1727,7 +1727,7 @@ void __init build_all_zonelists(void) ...@@ -1727,7 +1727,7 @@ void __init build_all_zonelists(void)
*/ */
#define PAGES_PER_WAITQUEUE 256 #define PAGES_PER_WAITQUEUE 256
static inline unsigned long wait_table_size(unsigned long pages) static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
{ {
unsigned long size = 1; unsigned long size = 1;
...@@ -2019,13 +2019,15 @@ void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) ...@@ -2019,13 +2019,15 @@ void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
* The per-page waitqueue mechanism uses hashed waitqueues * The per-page waitqueue mechanism uses hashed waitqueues
* per zone. * per zone.
*/ */
zone->wait_table_size = wait_table_size(zone_size_pages); zone->wait_table_hash_nr_entries =
zone->wait_table_bits = wait_table_bits(zone->wait_table_size); wait_table_hash_nr_entries(zone_size_pages);
zone->wait_table_bits =
wait_table_bits(zone->wait_table_hash_nr_entries);
zone->wait_table = (wait_queue_head_t *) zone->wait_table = (wait_queue_head_t *)
alloc_bootmem_node(pgdat, zone->wait_table_size alloc_bootmem_node(pgdat, zone->wait_table_hash_nr_entries
* sizeof(wait_queue_head_t)); * sizeof(wait_queue_head_t));
for(i = 0; i < zone->wait_table_size; ++i) for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
init_waitqueue_head(zone->wait_table + i); init_waitqueue_head(zone->wait_table + i);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment