tlb.c 7.73 KB
Newer Older
Glauber Costa's avatar
Glauber Costa committed
1 2 3 4 5 6
#include <linux/init.h>

#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
Tejun Heo's avatar
Tejun Heo committed
7
#include <linux/module.h>
Glauber Costa's avatar
Glauber Costa committed
8 9 10

#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
Tejun Heo's avatar
Tejun Heo committed
11
#include <asm/apic.h>
Tejun Heo's avatar
Tejun Heo committed
12
#include <asm/uv/uv.h>
13

14 15 16
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
			= { &init_mm, 0, };

17
#include <mach_ipi.h>
Glauber Costa's avatar
Glauber Costa committed
18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 *	Smarter SMP flushing macros.
 *		c/o Linus Torvalds.
 *
 *	These mean you can really definitely utterly forget about
 *	writing to user space from interrupts. (Its not allowed anyway).
 *
 *	Optimizations Manfred Spraul <manfred@colorfullife.com>
 *
 *	More scalable flush, from Andi Kleen
 *
 *	To avoid global state use 8 different call vectors.
 *	Each CPU uses a specific vector to trigger flushes on other
 *	CPUs. Depending on the received vector the target CPUs look into
32
 *	the right array slot for the flush data.
Glauber Costa's avatar
Glauber Costa committed
33 34 35 36 37 38 39 40 41 42 43 44
 *
 *	With more than 8 CPUs they are hashed to the 8 available
 *	vectors. The limited global vector space forces us to this right now.
 *	In future when interrupts are split into per CPU domains this could be
 *	fixed, at the cost of triggering multiple IPIs in some cases.
 */

union smp_flush_state {
	struct {
		struct mm_struct *flush_mm;
		unsigned long flush_va;
		spinlock_t tlbstate_lock;
45
		DECLARE_BITMAP(flush_cpumask, NR_CPUS);
Glauber Costa's avatar
Glauber Costa committed
46
	};
47 48
	char pad[CONFIG_X86_INTERNODE_CACHE_BYTES];
} ____cacheline_internodealigned_in_smp;
Glauber Costa's avatar
Glauber Costa committed
49 50 51 52

/* State is put into the per CPU data section, but padded
   to a full cache line because other CPUs can access it and we don't
   want false sharing in the per cpu data segment. */
53
static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
Glauber Costa's avatar
Glauber Costa committed
54 55 56 57 58 59 60

/*
 * We cannot call mmdrop() because we are in interrupt context,
 * instead update mm->cpu_vm_mask.
 */
void leave_mm(int cpu)
{
61
	if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
Glauber Costa's avatar
Glauber Costa committed
62
		BUG();
63
	cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
Glauber Costa's avatar
Glauber Costa committed
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
	load_cr3(swapper_pg_dir);
}
EXPORT_SYMBOL_GPL(leave_mm);

/*
 *
 * The flush IPI assumes that a thread switch happens in this order:
 * [cpu0: the cpu that switches]
 * 1) switch_mm() either 1a) or 1b)
 * 1a) thread switch to a different mm
 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
 *	Stop ipi delivery for the old mm. This is not synchronized with
 *	the other cpus, but smp_invalidate_interrupt ignore flush ipis
 *	for the wrong mm, and in the worst case we perform a superfluous
 *	tlb flush.
 * 1a2) set cpu mmu_state to TLBSTATE_OK
 *	Now the smp_invalidate_interrupt won't call leave_mm if cpu0
 *	was in lazy tlb mode.
 * 1a3) update cpu active_mm
 *	Now cpu0 accepts tlb flushes for the new mm.
 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
 *	Now the other cpus will send tlb flush ipis.
 * 1a4) change cr3.
 * 1b) thread switch without mm change
 *	cpu active_mm is correct, cpu0 already handles
 *	flush ipis.
 * 1b1) set cpu mmu_state to TLBSTATE_OK
 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
 *	Atomically set the bit [other cpus will start sending flush ipis],
 *	and test the bit.
 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
 * 2) switch %%esp, ie current
 *
 * The interrupt must handle 2 special cases:
 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
 *   runs in kernel space, the cpu could load tlb entries for user space
 *   pages.
 *
 * The good news is that cpu mmu_state is local to each cpu, no
 * write/read ordering problems.
 */

/*
 * TLB flush IPI:
 *
 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
 * 2) Leave the mm if we are in the lazy tlb mode.
 *
 * Interrupts are disabled.
 */

Tejun Heo's avatar
Tejun Heo committed
116 117 118 119 120 121 122 123 124 125 126
/*
 * FIXME: use of asmlinkage is not consistent.  On x86_64 it's noop
 * but still used for documentation purpose but the usage is slightly
 * inconsistent.  On x86_32, asmlinkage is regparm(0) but interrupt
 * entry calls in with the first parameter in %eax.  Maybe define
 * intrlinkage?
 */
#ifdef CONFIG_X86_64
asmlinkage
#endif
void smp_invalidate_interrupt(struct pt_regs *regs)
Glauber Costa's avatar
Glauber Costa committed
127
{
Tejun Heo's avatar
Tejun Heo committed
128 129
	unsigned int cpu;
	unsigned int sender;
Glauber Costa's avatar
Glauber Costa committed
130 131 132 133 134 135 136 137
	union smp_flush_state *f;

	cpu = smp_processor_id();
	/*
	 * orig_rax contains the negated interrupt vector.
	 * Use that to determine where the sender put the data.
	 */
	sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
138
	f = &flush_state[sender];
Glauber Costa's avatar
Glauber Costa committed
139

140
	if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
Glauber Costa's avatar
Glauber Costa committed
141 142 143 144 145 146 147 148 149 150
		goto out;
		/*
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */

151 152
	if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
		if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
Glauber Costa's avatar
Glauber Costa committed
153 154 155 156 157 158 159 160 161
			if (f->flush_va == TLB_FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(f->flush_va);
		} else
			leave_mm(cpu);
	}
out:
	ack_APIC_irq();
Tejun Heo's avatar
Tejun Heo committed
162
	smp_mb__before_clear_bit();
163
	cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
Tejun Heo's avatar
Tejun Heo committed
164
	smp_mb__after_clear_bit();
165
	inc_irq_stat(irq_tlb_count);
Glauber Costa's avatar
Glauber Costa committed
166 167
}

168 169
static void flush_tlb_others_ipi(const struct cpumask *cpumask,
				 struct mm_struct *mm, unsigned long va)
Glauber Costa's avatar
Glauber Costa committed
170
{
Tejun Heo's avatar
Tejun Heo committed
171
	unsigned int sender;
Glauber Costa's avatar
Glauber Costa committed
172
	union smp_flush_state *f;
173

Glauber Costa's avatar
Glauber Costa committed
174 175
	/* Caller has disabled preemption */
	sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
176
	f = &flush_state[sender];
Glauber Costa's avatar
Glauber Costa committed
177 178 179 180 181 182 183 184 185 186

	/*
	 * Could avoid this lock when
	 * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
	 * probably not worth checking this for a cache-hot lock.
	 */
	spin_lock(&f->tlbstate_lock);

	f->flush_mm = mm;
	f->flush_va = va;
187 188
	cpumask_andnot(to_cpumask(f->flush_cpumask),
		       cpumask, cpumask_of(smp_processor_id()));
Glauber Costa's avatar
Glauber Costa committed
189

190 191 192 193 194
	/*
	 * Make the above memory operations globally visible before
	 * sending the IPI.
	 */
	smp_mb();
Glauber Costa's avatar
Glauber Costa committed
195 196 197 198
	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
199 200
	send_IPI_mask(to_cpumask(f->flush_cpumask),
		      INVALIDATE_TLB_VECTOR_START + sender);
Glauber Costa's avatar
Glauber Costa committed
201

202
	while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
Glauber Costa's avatar
Glauber Costa committed
203 204 205 206 207 208 209
		cpu_relax();

	f->flush_mm = NULL;
	f->flush_va = 0;
	spin_unlock(&f->tlbstate_lock);
}

210 211 212 213
void native_flush_tlb_others(const struct cpumask *cpumask,
			     struct mm_struct *mm, unsigned long va)
{
	if (is_uv_system()) {
Tejun Heo's avatar
Tejun Heo committed
214
		unsigned int cpu;
215

Tejun Heo's avatar
Tejun Heo committed
216 217 218 219 220
		cpu = get_cpu();
		cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
		if (cpumask)
			flush_tlb_others_ipi(cpumask, mm, va);
		put_cpu();
221
		return;
222 223 224 225
	}
	flush_tlb_others_ipi(cpumask, mm, va);
}

Ingo Molnar's avatar
Ingo Molnar committed
226
static int __cpuinit init_smp_flush(void)
Glauber Costa's avatar
Glauber Costa committed
227 228 229
{
	int i;

230 231
	for (i = 0; i < ARRAY_SIZE(flush_state); i++)
		spin_lock_init(&flush_state[i].tlbstate_lock);
232

Glauber Costa's avatar
Glauber Costa committed
233 234 235 236 237 238 239 240 241 242 243
	return 0;
}
core_initcall(init_smp_flush);

void flush_tlb_current_task(void)
{
	struct mm_struct *mm = current->mm;

	preempt_disable();

	local_flush_tlb();
244 245
	if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
		flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
Glauber Costa's avatar
Glauber Costa committed
246 247 248 249 250 251 252 253 254 255 256 257 258
	preempt_enable();
}

void flush_tlb_mm(struct mm_struct *mm)
{
	preempt_disable();

	if (current->active_mm == mm) {
		if (current->mm)
			local_flush_tlb();
		else
			leave_mm(smp_processor_id());
	}
259 260
	if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
		flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
Glauber Costa's avatar
Glauber Costa committed
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277

	preempt_enable();
}

void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{
	struct mm_struct *mm = vma->vm_mm;

	preempt_disable();

	if (current->active_mm == mm) {
		if (current->mm)
			__flush_tlb_one(va);
		else
			leave_mm(smp_processor_id());
	}

278 279
	if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
		flush_tlb_others(&mm->cpu_vm_mask, mm, va);
Glauber Costa's avatar
Glauber Costa committed
280 281 282 283 284 285 286 287 288

	preempt_enable();
}

static void do_flush_tlb_all(void *info)
{
	unsigned long cpu = smp_processor_id();

	__flush_tlb_all();
289
	if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
Glauber Costa's avatar
Glauber Costa committed
290 291 292 293 294
		leave_mm(cpu);
}

void flush_tlb_all(void)
{
295
	on_each_cpu(do_flush_tlb_all, NULL, 1);
Glauber Costa's avatar
Glauber Costa committed
296
}