Commit 2cc6e0e0 authored by David Mosberger's avatar David Mosberger

ia64: Make flush_tlb_mm() work for multi-threaded address-spaces on SMP machines.

parent eb4e9b51
......@@ -210,6 +210,18 @@ smp_flush_tlb_all (void)
local_flush_tlb_all();
}
void
smp_flush_tlb_mm (struct mm_struct *mm)
{
local_flush_tlb_mm(mm);
/* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
return;
smp_call_function((void (*)(void *))local_flush_tlb_mm, mm, 1, 1);
}
/*
* Run a function on another CPU
* <func> The function to run. This must be fast and non-blocking.
......
......@@ -26,25 +26,42 @@ extern void local_flush_tlb_all (void);
#ifdef CONFIG_SMP
extern void smp_flush_tlb_all (void);
extern void smp_flush_tlb_mm (struct mm_struct *mm);
# define flush_tlb_all() smp_flush_tlb_all()
#else
# define flush_tlb_all() local_flush_tlb_all()
#endif
static inline void
local_flush_tlb_mm (struct mm_struct *mm)
{
if (mm == current->active_mm) {
get_new_mmu_context(mm);
reload_context(mm);
}
}
/*
* Flush a specified user mapping
* Flush a specified user mapping. This is called, e.g., as a result of fork() and
* exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
* the PTEs of the parent task.
*/
static inline void
flush_tlb_mm (struct mm_struct *mm)
{
if (mm) {
mm->context = 0;
if (mm == current->active_mm) {
/* This is called, e.g., as a result of exec(). */
get_new_mmu_context(mm);
reload_context(mm);
}
}
if (!mm)
return;
mm->context = 0;
if (atomic_read(&mm->mm_users) == 0)
return; /* happens as a result of exit_mmap() */
#ifdef CONFIG_SMP
smp_flush_tlb_mm(mm);
#else
local_flush_tlb_mm(mm);
#endif
}
extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment