Commit 6dd01bed authored by Tejun Heo's avatar Tejun Heo

x86: prepare for tlb merge

Impact: clean up, ipi vector number reordering for x86_32

Make the following changes to prepare for tlb merge.

* reorder x86_32 ip vectors

* adjust tlb_32.c and tlb_64.c such that their logics coincide exactly
	- on spurious invalidate ipi, tlb_32 acks the irq
	- tlb_64 now has proper memory barriers around clearing
          flush_cpumask (no change in generated code)

* unexport flush_tlb_page from tlb_32.c, there's no user

* use unsigned int for cpu id

* drop unnecessary includes from tlb_64.c
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent bdbcdd48
...@@ -49,31 +49,30 @@ ...@@ -49,31 +49,30 @@
* some of the following vectors are 'rare', they are merged * some of the following vectors are 'rare', they are merged
* into a single vector (CALL_FUNCTION_VECTOR) to save vector space. * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
* TLB, reschedule and local APIC vectors are performance-critical. * TLB, reschedule and local APIC vectors are performance-critical.
*
* Vectors 0xf0-0xfa are free (reserved for future Linux use).
*/ */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# define SPURIOUS_APIC_VECTOR 0xff # define SPURIOUS_APIC_VECTOR 0xff
# define ERROR_APIC_VECTOR 0xfe # define ERROR_APIC_VECTOR 0xfe
# define INVALIDATE_TLB_VECTOR 0xfd # define RESCHEDULE_VECTOR 0xfd
# define RESCHEDULE_VECTOR 0xfc # define CALL_FUNCTION_VECTOR 0xfc
# define CALL_FUNCTION_VECTOR 0xfb # define CALL_FUNCTION_SINGLE_VECTOR 0xfb
# define CALL_FUNCTION_SINGLE_VECTOR 0xfa # define THERMAL_APIC_VECTOR 0xfa
# define THERMAL_APIC_VECTOR 0xf0 /* 0xf1 - 0xf9 : free */
# define INVALIDATE_TLB_VECTOR 0xf0
#else #else
#define SPURIOUS_APIC_VECTOR 0xff # define SPURIOUS_APIC_VECTOR 0xff
#define ERROR_APIC_VECTOR 0xfe # define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd # define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc # define CALL_FUNCTION_VECTOR 0xfc
#define CALL_FUNCTION_SINGLE_VECTOR 0xfb # define CALL_FUNCTION_SINGLE_VECTOR 0xfb
#define THERMAL_APIC_VECTOR 0xfa # define THERMAL_APIC_VECTOR 0xfa
#define THRESHOLD_APIC_VECTOR 0xf9 # define THRESHOLD_APIC_VECTOR 0xf9
#define UV_BAU_MESSAGE 0xf8 # define UV_BAU_MESSAGE 0xf8
#define INVALIDATE_TLB_VECTOR_END 0xf7 # define INVALIDATE_TLB_VECTOR_END 0xf7
#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */ # define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
#define NUM_INVALIDATE_TLB_VECTORS 8 #define NUM_INVALIDATE_TLB_VECTORS 8
......
...@@ -84,13 +84,15 @@ EXPORT_SYMBOL_GPL(leave_mm); ...@@ -84,13 +84,15 @@ EXPORT_SYMBOL_GPL(leave_mm);
* *
* 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
* 2) Leave the mm if we are in the lazy tlb mode. * 2) Leave the mm if we are in the lazy tlb mode.
*
* Interrupts are disabled.
*/ */
void smp_invalidate_interrupt(struct pt_regs *regs) void smp_invalidate_interrupt(struct pt_regs *regs)
{ {
unsigned long cpu; unsigned int cpu;
cpu = get_cpu(); cpu = smp_processor_id();
if (!cpumask_test_cpu(cpu, flush_cpumask)) if (!cpumask_test_cpu(cpu, flush_cpumask))
goto out; goto out;
...@@ -112,12 +114,11 @@ void smp_invalidate_interrupt(struct pt_regs *regs) ...@@ -112,12 +114,11 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
} else } else
leave_mm(cpu); leave_mm(cpu);
} }
out:
ack_APIC_irq(); ack_APIC_irq();
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
cpumask_clear_cpu(cpu, flush_cpumask); cpumask_clear_cpu(cpu, flush_cpumask);
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
out:
put_cpu_no_resched();
inc_irq_stat(irq_tlb_count); inc_irq_stat(irq_tlb_count);
} }
...@@ -215,7 +216,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) ...@@ -215,7 +216,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
flush_tlb_others(&mm->cpu_vm_mask, mm, va); flush_tlb_others(&mm->cpu_vm_mask, mm, va);
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void *info) static void do_flush_tlb_all(void *info)
{ {
......
#include <linux/init.h> #include <linux/init.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/delay.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h>
#include <asm/mtrr.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/proto.h> #include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/idle.h>
#include <asm/uv/uv.h> #include <asm/uv/uv.h>
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
...@@ -121,8 +115,8 @@ EXPORT_SYMBOL_GPL(leave_mm); ...@@ -121,8 +115,8 @@ EXPORT_SYMBOL_GPL(leave_mm);
asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
{ {
int cpu; unsigned int cpu;
int sender; unsigned int sender;
union smp_flush_state *f; union smp_flush_state *f;
cpu = smp_processor_id(); cpu = smp_processor_id();
...@@ -155,14 +149,16 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) ...@@ -155,14 +149,16 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
} }
out: out:
ack_APIC_irq(); ack_APIC_irq();
smp_mb__before_clear_bit();
cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
smp_mb__after_clear_bit();
inc_irq_stat(irq_tlb_count); inc_irq_stat(irq_tlb_count);
} }
static void flush_tlb_others_ipi(const struct cpumask *cpumask, static void flush_tlb_others_ipi(const struct cpumask *cpumask,
struct mm_struct *mm, unsigned long va) struct mm_struct *mm, unsigned long va)
{ {
int sender; unsigned int sender;
union smp_flush_state *f; union smp_flush_state *f;
/* Caller has disabled preemption */ /* Caller has disabled preemption */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment