Commit c4fe760e authored by Joe Perches's avatar Joe Perches Committed by Ingo Molnar

include/asm-x86/mmu_context_64.h: checkpatch cleanups - formatting only

Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 55464da9
...@@ -20,12 +20,12 @@ void destroy_context(struct mm_struct *mm); ...@@ -20,12 +20,12 @@ void destroy_context(struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (read_pda(mmu_state) == TLBSTATE_OK) if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY); write_pda(mmu_state, TLBSTATE_LAZY);
#endif #endif
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
unsigned cpu = smp_processor_id(); unsigned cpu = smp_processor_id();
...@@ -39,7 +39,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -39,7 +39,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpu_set(cpu, next->cpu_vm_mask); cpu_set(cpu, next->cpu_vm_mask);
load_cr3(next->pgd); load_cr3(next->pgd);
if (unlikely(next->context.ldt != prev->context.ldt)) if (unlikely(next->context.ldt != prev->context.ldt))
load_LDT_nolock(&next->context); load_LDT_nolock(&next->context);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -48,7 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -48,7 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (read_pda(active_mm) != next) if (read_pda(active_mm) != next)
BUG(); BUG();
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled /* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3 * tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables. * to make sure to use no freed page tables.
*/ */
...@@ -59,13 +59,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -59,13 +59,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
#endif #endif
} }
#define deactivate_mm(tsk,mm) do { \ #define deactivate_mm(tsk, mm) \
load_gs_index(0); \ do { \
asm volatile("movl %0,%%fs"::"r"(0)); \ load_gs_index(0); \
} while(0) asm volatile("movl %0,%%fs"::"r"(0)); \
} while (0)
#define activate_mm(prev, next) \ #define activate_mm(prev, next) \
switch_mm((prev),(next),NULL) switch_mm((prev), (next), NULL)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment