Commit 22f60da7 authored by David Mosberger's avatar David Mosberger

ia64: TLB flushing fixes and reserve large-page syscall numbers.

parent b2cad830
...@@ -49,6 +49,7 @@ wrap_mmu_context (struct mm_struct *mm) ...@@ -49,6 +49,7 @@ wrap_mmu_context (struct mm_struct *mm)
{ {
unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx; unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
struct task_struct *tsk; struct task_struct *tsk;
int i;
if (ia64_ctx.next > max_ctx) if (ia64_ctx.next > max_ctx)
ia64_ctx.next = 300; /* skip daemons */ ia64_ctx.next = 300; /* skip daemons */
...@@ -77,7 +78,11 @@ wrap_mmu_context (struct mm_struct *mm) ...@@ -77,7 +78,11 @@ wrap_mmu_context (struct mm_struct *mm)
ia64_ctx.limit = tsk_context; ia64_ctx.limit = tsk_context;
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
flush_tlb_all(); /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
for (i = 0; i < smp_num_cpus; ++i)
if (i != smp_processor_id())
per_cpu(ia64_need_tlb_flush, i) = 1;
__flush_tlb_all();
} }
void void
......
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
#define _ASM_IA64_MMU_CONTEXT_H #define _ASM_IA64_MMU_CONTEXT_H
/* /*
* Copyright (C) 1998-2001 Hewlett-Packard Co * Copyright (C) 1998-2002 Hewlett-Packard Co
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
/* /*
...@@ -13,8 +13,6 @@ ...@@ -13,8 +13,6 @@
* consider the region number when performing a TLB lookup, we need to assign a unique * consider the region number when performing a TLB lookup, we need to assign a unique
* region id to each region in a process. We use the least significant three bits in a * region id to each region in a process. We use the least significant three bits in a
* region id for this purpose. * region id for this purpose.
*
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
*/ */
#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
...@@ -23,6 +21,8 @@ ...@@ -23,6 +21,8 @@
# ifndef __ASSEMBLY__ # ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <linux/percpu.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -36,6 +36,7 @@ struct ia64_ctx { ...@@ -36,6 +36,7 @@ struct ia64_ctx {
}; };
extern struct ia64_ctx ia64_ctx; extern struct ia64_ctx ia64_ctx;
extern u8 ia64_need_tlb_flush __per_cpu_data;
extern void wrap_mmu_context (struct mm_struct *mm); extern void wrap_mmu_context (struct mm_struct *mm);
...@@ -44,9 +45,28 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) ...@@ -44,9 +45,28 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{ {
} }
/*
* When the context counter wraps around all TLBs need to be flushed because an old
* context number might have been reused. This is signalled by the ia64_need_tlb_flush
* per-CPU variable, which is checked in the routine below. Called by activate_mm().
* <efocht@ess.nec.de>
*/
static inline void
delayed_tlb_flush (void)
{
extern void __flush_tlb_all (void);
if (unlikely(ia64_need_tlb_flush)) {
__flush_tlb_all();
__ia64_need_tlb_flush = 0;
}
}
static inline void static inline void
get_new_mmu_context (struct mm_struct *mm) get_new_mmu_context (struct mm_struct *mm)
{ {
delayed_tlb_flush();
spin_lock(&ia64_ctx.lock); spin_lock(&ia64_ctx.lock);
{ {
if (ia64_ctx.next >= ia64_ctx.limit) if (ia64_ctx.next >= ia64_ctx.limit)
...@@ -54,7 +74,6 @@ get_new_mmu_context (struct mm_struct *mm) ...@@ -54,7 +74,6 @@ get_new_mmu_context (struct mm_struct *mm)
mm->context = ia64_ctx.next++; mm->context = ia64_ctx.next++;
} }
spin_unlock(&ia64_ctx.lock); spin_unlock(&ia64_ctx.lock);
} }
static inline void static inline void
......
...@@ -72,12 +72,15 @@ ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end) ...@@ -72,12 +72,15 @@ ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
{ {
unsigned long nr; unsigned long nr;
if (end - start >= 1024*1024*1024*1024UL) { if (unlikely (end - start >= 1024*1024*1024*1024UL
|| rgn_index(start) != rgn_index(end - 1)))
{
/* /*
* If we flush more than a tera-byte, we're probably better off just * If we flush more than a tera-byte or across regions, we're probably
* flushing the entire address space. * better off just flushing the entire TLB(s). This should be very rare
* and is not worth optimizing for.
*/ */
flush_tlb_mm(tlb->mm); flush_tlb_all();
} else { } else {
/* /*
* XXX fix me: flush_tlb_range() should take an mm pointer instead of a * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
......
...@@ -223,6 +223,10 @@ ...@@ -223,6 +223,10 @@
#define __NR_sched_setaffinity 1231 #define __NR_sched_setaffinity 1231
#define __NR_sched_getaffinity 1232 #define __NR_sched_getaffinity 1232
#define __NR_security 1233 #define __NR_security 1233
#define __NR_get_large_pages 1234
#define __NR_free_large_pages 1235
#define __NR_share_large_pages 1236
#define __NR_unshare_large_pages 1237
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment