Commit bc8f63af authored by David Mosberger's avatar David Mosberger

Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5

into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents f66525a4 ad2ac7d8
...@@ -2724,8 +2724,8 @@ sys32_open (const char * filename, int flags, int mode) ...@@ -2724,8 +2724,8 @@ sys32_open (const char * filename, int flags, int mode)
struct epoll_event32 struct epoll_event32
{ {
u32 events; u32 events;
u64 data; u32 data[2];
} __attribute__((packed)); };
asmlinkage long asmlinkage long
sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 *event) sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 *event)
...@@ -2740,10 +2740,10 @@ sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 *event) ...@@ -2740,10 +2740,10 @@ sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 *event)
return error; return error;
__get_user(event64.events, &event->events); __get_user(event64.events, &event->events);
__get_user(data_halfword, (u32*)(&event->data)); __get_user(data_halfword, &event->data[0]);
event64.data = data_halfword; event64.data = data_halfword;
__get_user(data_halfword, ((u32*)(&event->data) + 1)); __get_user(data_halfword, &event->data[1]);
event64.data |= ((u64)data_halfword) << 32; event64.data |= (u64)data_halfword << 32;
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
error = sys_epoll_ctl(epfd, op, fd, &event64); error = sys_epoll_ctl(epfd, op, fd, &event64);
...@@ -2758,8 +2758,9 @@ sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents, ...@@ -2758,8 +2758,9 @@ sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents,
{ {
struct epoll_event *events64 = NULL; struct epoll_event *events64 = NULL;
mm_segment_t old_fs = get_fs(); mm_segment_t old_fs = get_fs();
int error; int error, numevents, size;
int evt_idx; int evt_idx;
int do_free_pages = 0;
if (maxevents <= 0) { if (maxevents <= 0) {
return -EINVAL; return -EINVAL;
...@@ -2770,43 +2771,44 @@ sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents, ...@@ -2770,43 +2771,44 @@ sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents,
maxevents * sizeof(struct epoll_event32)))) maxevents * sizeof(struct epoll_event32))))
return error; return error;
/* Allocate the space needed for the intermediate copy */ /*
events64 = kmalloc(maxevents * sizeof(struct epoll_event), GFP_KERNEL); * Allocate space for the intermediate copy. If the space needed
* is large enough to cause kmalloc to fail, then try again with
* __get_free_pages.
*/
size = maxevents * sizeof(struct epoll_event);
events64 = kmalloc(size, GFP_KERNEL);
if (events64 == NULL) { if (events64 == NULL) {
return -ENOMEM; events64 = __get_free_pages(GFP_KERNEL, get_order(size));
} if (events64 == NULL)
return -ENOMEM;
/* Expand the 32-bit structures into the 64-bit structures */ do_free_pages = 1;
for (evt_idx = 0; evt_idx < maxevents; evt_idx++) {
u32 data_halfword;
__get_user(events64[evt_idx].events, &events[evt_idx].events);
__get_user(data_halfword, (u32*)(&events[evt_idx].data));
events64[evt_idx].data = data_halfword;
__get_user(data_halfword, ((u32*)(&events[evt_idx].data) + 1));
events64[evt_idx].data |= ((u64)data_halfword) << 32;
} }
/* Do the system call */ /* Do the system call */
set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/ set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
error = sys_epoll_wait(epfd, events64, maxevents, timeout); numevents = sys_epoll_wait(epfd, events64, maxevents, timeout);
set_fs(old_fs); set_fs(old_fs);
/* Don't modify userspace memory if we're returning an error */ /* Don't modify userspace memory if we're returning an error */
if (!error) { if (numevents > 0) {
/* Translate the 64-bit structures back into the 32-bit /* Translate the 64-bit structures back into the 32-bit
structures */ structures */
for (evt_idx = 0; evt_idx < maxevents; evt_idx++) { for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
__put_user(events64[evt_idx].events, __put_user(events64[evt_idx].events,
&events[evt_idx].events); &events[evt_idx].events);
__put_user((u32)(events64[evt_idx].data), __put_user((u32)events64[evt_idx].data,
(u32*)(&events[evt_idx].data)); &events[evt_idx].data[0]);
__put_user((u32)(events64[evt_idx].data >> 32), __put_user((u32)(events64[evt_idx].data >> 32),
((u32*)(&events[evt_idx].data) + 1)); &events[evt_idx].data[1]);
} }
} }
kfree(events64); if (do_free_pages)
return error; free_pages(events64, get_order(size));
else
kfree(events64);
return numevents;
} }
#ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */ #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
......
...@@ -454,6 +454,12 @@ acpi_numa_arch_fixup (void) ...@@ -454,6 +454,12 @@ acpi_numa_arch_fixup (void)
{ {
int i, j, node_from, node_to; int i, j, node_from, node_to;
/* If there's no SRAT, fix the phys_id */
if (srat_num_cpus == 0) {
node_cpuid[0].phys_id = hard_smp_processor_id();
return;
}
/* calculate total number of nodes in system from PXM bitmap */ /* calculate total number of nodes in system from PXM bitmap */
numnodes = 0; /* init total nodes in system */ numnodes = 0; /* init total nodes in system */
...@@ -614,6 +620,12 @@ acpi_boot_init (void) ...@@ -614,6 +620,12 @@ acpi_boot_init (void)
smp_build_cpu_map(); smp_build_cpu_map();
# ifdef CONFIG_NUMA # ifdef CONFIG_NUMA
if (srat_num_cpus == 0) {
int cpu, i = 1;
for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id())
node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu];
}
build_cpu_to_node_map(); build_cpu_to_node_map();
# endif # endif
#endif #endif
......
...@@ -20,13 +20,46 @@ ...@@ -20,13 +20,46 @@
#define TASK_HPAGE_BASE (REGION_HPAGE << REGION_SHIFT) #define TASK_HPAGE_BASE (REGION_HPAGE << REGION_SHIFT)
static long htlbpagemem; static long htlbpagemem;
int htlbpage_max; int htlbpage_max;
static long htlbzone_pages; static long htlbzone_pages;
static LIST_HEAD(htlbpage_freelist); static struct list_head hugepage_freelists[MAX_NUMNODES];
static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED; static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
static void enqueue_huge_page(struct page *page)
{
list_add(&page->list,
&hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
}
static struct page *dequeue_huge_page(void)
{
int nid = numa_node_id();
struct page *page = NULL;
if (list_empty(&hugepage_freelists[nid])) {
for (nid = 0; nid < MAX_NUMNODES; ++nid)
if (!list_empty(&hugepage_freelists[nid]))
break;
}
if (nid >= 0 && nid < MAX_NUMNODES &&
!list_empty(&hugepage_freelists[nid])) {
page = list_entry(hugepage_freelists[nid].next, struct page, list);
list_del(&page->list);
}
return page;
}
static struct page *alloc_fresh_huge_page(void)
{
static int nid = 0;
struct page *page;
page = alloc_pages_node(nid, GFP_HIGHUSER, HUGETLB_PAGE_ORDER);
nid = (nid + 1) % numnodes;
return page;
}
void free_huge_page(struct page *page); void free_huge_page(struct page *page);
static struct page *alloc_hugetlb_page(void) static struct page *alloc_hugetlb_page(void)
...@@ -35,13 +68,11 @@ static struct page *alloc_hugetlb_page(void) ...@@ -35,13 +68,11 @@ static struct page *alloc_hugetlb_page(void)
struct page *page; struct page *page;
spin_lock(&htlbpage_lock); spin_lock(&htlbpage_lock);
if (list_empty(&htlbpage_freelist)) { page = dequeue_huge_page();
if (!page) {
spin_unlock(&htlbpage_lock); spin_unlock(&htlbpage_lock);
return NULL; return NULL;
} }
page = list_entry(htlbpage_freelist.next, struct page, list);
list_del(&page->list);
htlbpagemem--; htlbpagemem--;
spin_unlock(&htlbpage_lock); spin_unlock(&htlbpage_lock);
set_page_count(page, 1); set_page_count(page, 1);
...@@ -228,7 +259,7 @@ void free_huge_page(struct page *page) ...@@ -228,7 +259,7 @@ void free_huge_page(struct page *page)
INIT_LIST_HEAD(&page->list); INIT_LIST_HEAD(&page->list);
spin_lock(&htlbpage_lock); spin_lock(&htlbpage_lock);
list_add(&page->list, &htlbpage_freelist); enqueue_huge_page(page);
htlbpagemem++; htlbpagemem++;
spin_unlock(&htlbpage_lock); spin_unlock(&htlbpage_lock);
} }
...@@ -371,7 +402,7 @@ int try_to_free_low(int count) ...@@ -371,7 +402,7 @@ int try_to_free_low(int count)
map = NULL; map = NULL;
spin_lock(&htlbpage_lock); spin_lock(&htlbpage_lock);
list_for_each(p, &htlbpage_freelist) { list_for_each(p, &hugepage_freelists[0]) {
if (map) { if (map) {
list_del(&map->list); list_del(&map->list);
update_and_free_page(map); update_and_free_page(map);
...@@ -408,11 +439,11 @@ int set_hugetlb_mem_size(int count) ...@@ -408,11 +439,11 @@ int set_hugetlb_mem_size(int count)
return (int)htlbzone_pages; return (int)htlbzone_pages;
if (lcount > 0) { /* Increase the mem size. */ if (lcount > 0) { /* Increase the mem size. */
while (lcount--) { while (lcount--) {
page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER); page = alloc_fresh_huge_page();
if (page == NULL) if (page == NULL)
break; break;
spin_lock(&htlbpage_lock); spin_lock(&htlbpage_lock);
list_add(&page->list, &htlbpage_freelist); enqueue_huge_page(page);
htlbpagemem++; htlbpagemem++;
htlbzone_pages++; htlbzone_pages++;
spin_unlock(&htlbpage_lock); spin_unlock(&htlbpage_lock);
...@@ -449,17 +480,18 @@ __setup("hugepages=", hugetlb_setup); ...@@ -449,17 +480,18 @@ __setup("hugepages=", hugetlb_setup);
static int __init hugetlb_init(void) static int __init hugetlb_init(void)
{ {
int i, j; int i;
struct page *page; struct page *page;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&hugepage_freelists[i]);
for (i = 0; i < htlbpage_max; ++i) { for (i = 0; i < htlbpage_max; ++i) {
page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER); page = alloc_fresh_huge_page();
if (!page) if (!page)
break; break;
for (j = 0; j < HPAGE_SIZE/PAGE_SIZE; ++j)
SetPageReserved(&page[j]);
spin_lock(&htlbpage_lock); spin_lock(&htlbpage_lock);
list_add(&page->list, &htlbpage_freelist); enqueue_huge_page(page);
spin_unlock(&htlbpage_lock); spin_unlock(&htlbpage_lock);
} }
htlbpage_max = htlbpagemem = htlbzone_pages = i; htlbpage_max = htlbpagemem = htlbzone_pages = i;
......
...@@ -867,6 +867,9 @@ sn_pci_init (void) ...@@ -867,6 +867,9 @@ sn_pci_init (void)
int i = 0; int i = 0;
struct pci_controller *controller; struct pci_controller *controller;
if (!ia64_platform_is("sn2"))
return 0;
/* /*
* set pci_raw_ops, etc. * set pci_raw_ops, etc.
*/ */
......
...@@ -147,7 +147,6 @@ char drive_info[4*16]; ...@@ -147,7 +147,6 @@ char drive_info[4*16];
* Sets up an initial console to aid debugging. Intended primarily * Sets up an initial console to aid debugging. Intended primarily
* for bringup. See start_kernel() in init/main.c. * for bringup. See start_kernel() in init/main.c.
*/ */
#if defined(CONFIG_IA64_EARLY_PRINTK_SGI_SN) || defined(CONFIG_IA64_SGI_SN_SIM)
void __init void __init
early_sn_setup(void) early_sn_setup(void)
...@@ -189,7 +188,6 @@ early_sn_setup(void) ...@@ -189,7 +188,6 @@ early_sn_setup(void)
printk(KERN_DEBUG "early_sn_setup: setting master_node_bedrock_address to 0x%lx\n", master_node_bedrock_address); printk(KERN_DEBUG "early_sn_setup: setting master_node_bedrock_address to 0x%lx\n", master_node_bedrock_address);
} }
} }
#endif /* CONFIG_IA64_EARLY_PRINTK_SGI_SN */
#ifdef CONFIG_IA64_MCA #ifdef CONFIG_IA64_MCA
extern int platform_intr_list[]; extern int platform_intr_list[];
......
...@@ -128,7 +128,7 @@ typedef struct irqpda_s irqpda_t; ...@@ -128,7 +128,7 @@ typedef struct irqpda_s irqpda_t;
* Check if given a compact node id the corresponding node has all the * Check if given a compact node id the corresponding node has all the
* cpus disabled. * cpus disabled.
*/ */
#define is_headless_node(cnode) (!any_online_cpu(node_to_cpumask(cnode))) #define is_headless_node(cnode) (!node_to_cpu_mask[cnode])
/* /*
* Check if given a node vertex handle the corresponding node has all the * Check if given a node vertex handle the corresponding node has all the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment