Commit fa09aa0a authored by Anton Blanchard's avatar Anton Blanchard

ppc64: update ppc64 tlb batch code

parent 4bc583b4
......@@ -46,6 +46,7 @@
#include <asm/abs_addr.h>
#include <asm/tlbflush.h>
#include <asm/eeh.h>
#include <asm/tlb.h>
/*
* Note: pte --> Linux PTE
......@@ -415,12 +416,11 @@ void flush_hash_range(unsigned long context, unsigned long number, int local)
ppc_md.flush_hash_range(context, number, local);
} else {
int i;
struct tlb_batch_data *ptes =
&tlb_batch_array[smp_processor_id()][0];
struct ppc64_tlb_batch *batch =
&ppc64_tlb_batch[smp_processor_id()];
for (i = 0; i < number; i++) {
flush_hash_page(context, ptes->addr, ptes->pte, local);
ptes++;
}
for (i = 0; i < number; i++)
flush_hash_page(context, batch->addr[i], batch->pte[i],
local);
}
}
......@@ -20,6 +20,7 @@
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
/*
* Create a pte. Used during initialization only.
......@@ -372,31 +373,32 @@ static void pSeries_flush_hash_range(unsigned long context,
{
unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
int i, j;
unsigned long va_array[MAX_BATCH_FLUSH];
HPTE *hptep;
Hpte_dword0 dw0;
struct tlb_batch_data *ptes = &tlb_batch_array[smp_processor_id()][0];
struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
/* XXX fix for large ptes */
unsigned long large = 0;
j = 0;
for (i = 0; i < number; i++) {
if ((ptes->addr >= USER_START) && (ptes->addr <= USER_END))
vsid = get_vsid(context, ptes->addr);
if ((batch->addr[i] >= USER_START) &&
(batch->addr[i] <= USER_END))
vsid = get_vsid(context, batch->addr[i]);
else
vsid = get_kernel_vsid(ptes->addr);
vsid = get_kernel_vsid(batch->addr[i]);
va = (vsid << 28) | (ptes->addr & 0x0fffffff);
va_array[j] = va;
va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
batch->vaddr[j] = va;
if (large)
vpn = va >> LARGE_PAGE_SHIFT;
else
vpn = va >> PAGE_SHIFT;
hash = hpt_hash(vpn, large);
secondary = (pte_val(ptes->pte) & _PAGE_SECONDARY) >> 15;
secondary = (pte_val(batch->pte[i]) & _PAGE_SECONDARY) >> 15;
if (secondary)
hash = ~hash;
slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
slot += (pte_val(ptes->pte) & _PAGE_GROUP_IX) >> 12;
slot += (pte_val(batch->pte[i]) & _PAGE_GROUP_IX) >> 12;
hptep = htab_data.htab + slot;
avpn = vpn >> 11;
......@@ -405,8 +407,6 @@ static void pSeries_flush_hash_range(unsigned long context,
dw0 = hptep->dw0.dw0;
ptes++;
if ((dw0.avpn != avpn) || !dw0.v) {
pSeries_unlock_hpte(hptep);
udbg_printf("invalidate missed\n");
......@@ -426,7 +426,7 @@ static void pSeries_flush_hash_range(unsigned long context,
asm volatile("\n\
clrldi %0,%0,16\n\
tlbiel %0"
: : "r" (va_array[i]) : "memory" );
: : "r" (batch->vaddr[i]) : "memory" );
}
asm volatile("ptesync":::"memory");
......@@ -440,7 +440,7 @@ static void pSeries_flush_hash_range(unsigned long context,
asm volatile("\n\
clrldi %0,%0,16\n\
tlbie %0"
: : "r" (va_array[i]) : "memory" );
: : "r" (batch->vaddr[i]) : "memory" );
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
......
......@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <asm/naca.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
/* Status return values */
#define H_Success 0
......@@ -775,15 +776,14 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
int local)
{
int i;
struct tlb_batch_data *ptes =
&tlb_batch_array[smp_processor_id()][0];
unsigned long flags;
struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
for (i = 0; i < number; i++) {
flush_hash_page(context, ptes->addr, ptes->pte, local);
ptes++;
}
for (i = 0; i < number; i++)
flush_hash_page(context, batch->addr[i], batch->pte[i], local);
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}
......
......@@ -295,7 +295,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
}
}
struct tlb_batch_data tlb_batch_array[NR_CPUS][MAX_BATCH_FLUSH];
struct ppc64_tlb_batch ppc64_tlb_batch[NR_CPUS];
void
__flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
......@@ -305,81 +305,69 @@ __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
pte_t *ptep;
pte_t pte;
unsigned long pgd_end, pmd_end;
unsigned long context;
int i = 0;
struct tlb_batch_data *ptes = &tlb_batch_array[smp_processor_id()][0];
unsigned long context = 0;
struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[smp_processor_id()];
unsigned long i = 0;
int local = 0;
if ( start >= end )
panic("flush_tlb_range: start (%016lx) greater than end (%016lx)\n", start, end );
if ( REGION_ID(start) != REGION_ID(end) )
panic("flush_tlb_range: start (%016lx) and end (%016lx) not in same region\n", start, end );
context = 0;
switch( REGION_ID(start) ) {
switch(REGION_ID(start)) {
case VMALLOC_REGION_ID:
pgd = pgd_offset_k( start );
pgd = pgd_offset_k(start);
break;
case IO_REGION_ID:
pgd = pgd_offset_i( start );
pgd = pgd_offset_i(start);
break;
case USER_REGION_ID:
pgd = pgd_offset( mm, start );
pgd = pgd_offset(mm, start);
context = mm->context;
/* XXX are there races with checking cpu_vm_mask? - Anton */
if (mm->cpu_vm_mask == (1 << smp_processor_id())) {
if (mm->cpu_vm_mask == (1 << smp_processor_id()))
local = 1;
}
break;
default:
panic("flush_tlb_range: invalid region for start (%016lx) and end (%016lx)\n", start, end);
}
do {
pgd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
if ( pgd_end > end )
if (pgd_end > end)
pgd_end = end;
if ( !pgd_none( *pgd ) ) {
pmd = pmd_offset( pgd, start );
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, start);
do {
pmd_end = ( start + PMD_SIZE ) & PMD_MASK;
if ( pmd_end > end )
pmd_end = (start + PMD_SIZE) & PMD_MASK;
if (pmd_end > end)
pmd_end = end;
if ( !pmd_none( *pmd ) ) {
ptep = pte_offset_kernel( pmd, start );
if (!pmd_none(*pmd)) {
ptep = pte_offset_kernel(pmd, start);
do {
if ( pte_val(*ptep) & _PAGE_HASHPTE ) {
if (pte_val(*ptep) & _PAGE_HASHPTE) {
pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
if ( pte_val(pte) & _PAGE_HASHPTE ) {
ptes->pte = pte;
ptes->addr = start;
ptes++;
if (pte_val(pte) & _PAGE_HASHPTE) {
batch->pte[i] = pte;
batch->addr[i] = start;
i++;
if (i == MAX_BATCH_FLUSH) {
flush_hash_range(context, MAX_BATCH_FLUSH, local);
if (i == PPC64_TLB_BATCH_NR) {
flush_hash_range(context, i, local);
i = 0;
ptes = &tlb_batch_array[smp_processor_id()][0];
}
}
}
start += PAGE_SIZE;
++ptep;
} while ( start < pmd_end );
}
else
} while (start < pmd_end);
} else {
start = pmd_end;
++pmd;
} while ( start < pgd_end );
}
else
++pmd;
} while (start < pgd_end);
} else {
start = pgd_end;
}
++pgd;
} while ( start < end );
} while (start < end);
if (i)
flush_hash_range(context, i, local);
......
/*
* TLB shootdown specifics for PPC64
*
* Copyright (C) 2002 Anton Blanchard, IBM Corp.
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _PPC64_TLB_H
#define _PPC64_TLB_H
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/mmu.h>
struct free_pte_ctx;
static inline void tlb_flush(struct free_pte_ctx *tlb);
/* Get the generic bits... */
#include <asm-generic/tlb.h>
/* Nothing needed here in fact... */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
/* Should make this at least as large as the generic batch size, but it
* takes up too much space */
#define PPC64_TLB_BATCH_NR 192
struct ppc64_tlb_batch {
unsigned long index;
pte_t pte[PPC64_TLB_BATCH_NR];
unsigned long addr[PPC64_TLB_BATCH_NR];
unsigned long vaddr[PPC64_TLB_BATCH_NR];
};
extern struct ppc64_tlb_batch ppc64_tlb_batch[NR_CPUS];
static inline void tlb_remove_tlb_entry(mmu_gather_t *tlb, pte_t *ptep,
unsigned long address)
{
int cpu = smp_processor_id();
struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[cpu];
unsigned long i = batch->index;
pte_t pte;
if (pte_val(*ptep) & _PAGE_HASHPTE) {
pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
if (pte_val(pte) & _PAGE_HASHPTE) {
int local = 0;
if (tlb->mm->cpu_vm_mask == (1 << cpu))
local = 1;
batch->pte[i] = pte;
batch->addr[i] = address;
i++;
if (i == PPC64_TLB_BATCH_NR) {
flush_hash_range(tlb->mm->context, i, local);
i = 0;
}
}
}
batch->index = i;
}
static inline void tlb_flush(struct free_pte_ctx *tlb)
{
int cpu = smp_processor_id();
struct ppc64_tlb_batch *batch = &ppc64_tlb_batch[cpu];
int local = 0;
if (tlb->mm->cpu_vm_mask == (1 << smp_processor_id()))
local = 1;
flush_hash_range(tlb->mm->context, batch->index, local);
batch->index = 0;
}
#endif /* _PPC64_TLB_H */
......@@ -35,12 +35,4 @@ extern void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
int local);
void flush_hash_range(unsigned long context, unsigned long number, int local);
/* TLB flush batching */
#define MAX_BATCH_FLUSH 128
struct tlb_batch_data {
pte_t pte;
unsigned long addr;
};
extern struct tlb_batch_data tlb_batch_array[NR_CPUS][MAX_BATCH_FLUSH];
#endif /* _PPC64_TLBFLUSH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment