Commit 4edf00a4 authored by Paul Burton's avatar Paul Burton Committed by Ralf Baechle

MIPS: Retrieve ASID masks using function accepting struct cpuinfo_mips

In preparation for supporting variable ASID masks, retrieve ASID masks
using functions in asm/cpu-info.h which accept struct cpuinfo_mips. This
will allow those functions to determine the ASID mask based upon the CPU
in a later patch. This also allows for the r3k & r8k cases to be handled
in Kconfig, which is arguably cleaner than the previous #ifdefs.
Signed-off-by: default avatarPaul Burton <paul.burton@imgtec.com>
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/13210/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent f1b711c6
...@@ -2449,6 +2449,17 @@ config CPU_R4000_WORKAROUNDS ...@@ -2449,6 +2449,17 @@ config CPU_R4000_WORKAROUNDS
config CPU_R4400_WORKAROUNDS config CPU_R4400_WORKAROUNDS
bool bool
config MIPS_ASID_SHIFT
int
default 6 if CPU_R3000 || CPU_TX39XX
default 4 if CPU_R8000
default 0
config MIPS_ASID_BITS
int
default 6 if CPU_R3000 || CPU_TX39XX
default 8
# #
# - Highmem only makes sense for the 32-bit kernel. # - Highmem only makes sense for the 32-bit kernel.
# - The current highmem code will only work properly on physically indexed # - The current highmem code will only work properly on physically indexed
......
...@@ -132,4 +132,14 @@ struct proc_cpuinfo_notifier_args { ...@@ -132,4 +132,14 @@ struct proc_cpuinfo_notifier_args {
# define cpu_vpe_id(cpuinfo) ({ (void)cpuinfo; 0; }) # define cpu_vpe_id(cpuinfo) ({ (void)cpuinfo; 0; })
#endif #endif
static inline unsigned long cpu_asid_inc(void)
{
return 1 << CONFIG_MIPS_ASID_SHIFT;
}
static inline unsigned long cpu_asid_mask(struct cpuinfo_mips *cpuinfo)
{
return ((1 << CONFIG_MIPS_ASID_BITS) - 1) << CONFIG_MIPS_ASID_SHIFT;
}
#endif /* __ASM_CPU_INFO_H */ #endif /* __ASM_CPU_INFO_H */
...@@ -65,37 +65,32 @@ extern unsigned long pgd_current[]; ...@@ -65,37 +65,32 @@ extern unsigned long pgd_current[];
back_to_back_c0_hazard(); \ back_to_back_c0_hazard(); \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
#define ASID_INC 0x40 /*
#define ASID_MASK 0xfc0 * All unused by hardware upper bits will be considered
* as a software asid extension.
#elif defined(CONFIG_CPU_R8000) */
static unsigned long asid_version_mask(unsigned int cpu)
#define ASID_INC 0x10 {
#define ASID_MASK 0xff0 unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
#else /* FIXME: not correct for R6000 */
#define ASID_INC 0x1 return ~(asid_mask | (asid_mask - 1));
#define ASID_MASK 0xff }
#endif static unsigned long asid_first_version(unsigned int cpu)
{
return ~asid_version_mask(cpu) + 1;
}
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) #define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
#define asid_cache(cpu) (cpu_data[cpu].asid_cache) #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
#define cpu_asid(cpu, mm) \
(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
*/
#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
/* Normal, classic MIPS get_new_mmu_context */ /* Normal, classic MIPS get_new_mmu_context */
static inline void static inline void
...@@ -104,7 +99,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) ...@@ -104,7 +99,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
extern void kvm_local_flush_tlb_all(void); extern void kvm_local_flush_tlb_all(void);
unsigned long asid = asid_cache(cpu); unsigned long asid = asid_cache(cpu);
if (! ((asid += ASID_INC) & ASID_MASK) ) { if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache) if (cpu_has_vtag_icache)
flush_icache_all(); flush_icache_all();
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
...@@ -113,7 +108,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) ...@@ -113,7 +108,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
local_flush_tlb_all(); /* start new asid cycle */ local_flush_tlb_all(); /* start new asid cycle */
#endif #endif
if (!asid) /* fix version if needed */ if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION; asid = asid_first_version(cpu);
} }
cpu_context(cpu, mm) = asid_cache(cpu) = asid; cpu_context(cpu, mm) = asid_cache(cpu) = asid;
...@@ -145,7 +140,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -145,7 +140,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
htw_stop(); htw_stop();
/* Check if our ASID is of an older version and thus invalid */ /* Check if our ASID is of an older version and thus invalid */
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
get_new_mmu_context(next, cpu); get_new_mmu_context(next, cpu);
write_c0_entryhi(cpu_asid(cpu, next)); write_c0_entryhi(cpu_asid(cpu, next));
TLBMISS_HANDLER_SETUP_PGD(next->pgd); TLBMISS_HANDLER_SETUP_PGD(next->pgd);
......
...@@ -2136,7 +2136,7 @@ void per_cpu_trap_init(bool is_boot_cpu) ...@@ -2136,7 +2136,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
} }
if (!cpu_data[cpu].asid_cache) if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; cpu_data[cpu].asid_cache = asid_first_version(cpu);
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm; current->active_mm = &init_mm;
......
...@@ -49,12 +49,18 @@ EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn); ...@@ -49,12 +49,18 @@ EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; int cpu = smp_processor_id();
return vcpu->arch.guest_kernel_asid[cpu] &
cpu_asid_mask(&cpu_data[cpu]);
} }
uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; int cpu = smp_processor_id();
return vcpu->arch.guest_user_asid[cpu] &
cpu_asid_mask(&cpu_data[cpu]);
} }
inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
...@@ -78,7 +84,8 @@ void kvm_mips_dump_host_tlbs(void) ...@@ -78,7 +84,8 @@ void kvm_mips_dump_host_tlbs(void)
old_pagemask = read_c0_pagemask(); old_pagemask = read_c0_pagemask();
kvm_info("HOST TLBs:\n"); kvm_info("HOST TLBs:\n");
kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); kvm_info("ASID: %#lx\n", read_c0_entryhi() &
cpu_asid_mask(&current_cpu_data));
for (i = 0; i < current_cpu_data.tlbsize; i++) { for (i = 0; i < current_cpu_data.tlbsize; i++) {
write_c0_index(i); write_c0_index(i);
...@@ -564,15 +571,15 @@ void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, ...@@ -564,15 +571,15 @@ void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
{ {
unsigned long asid = asid_cache(cpu); unsigned long asid = asid_cache(cpu);
asid += ASID_INC; asid += cpu_asid_inc();
if (!(asid & ASID_MASK)) { if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache) if (cpu_has_vtag_icache)
flush_icache_all(); flush_icache_all();
kvm_local_flush_tlb_all(); /* start new asid cycle */ kvm_local_flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */ if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION; asid = asid_first_version(cpu);
} }
cpu_context(cpu, mm) = asid_cache(cpu) = asid; cpu_context(cpu, mm) = asid_cache(cpu) = asid;
...@@ -627,6 +634,7 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu) ...@@ -627,6 +634,7 @@ static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
/* Restore ASID once we are scheduled back after preemption */ /* Restore ASID once we are scheduled back after preemption */
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
unsigned long flags; unsigned long flags;
int newasid = 0; int newasid = 0;
...@@ -637,7 +645,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -637,7 +645,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
local_irq_save(flags); local_irq_save(flags);
if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) & if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
ASID_VERSION_MASK) { asid_version_mask(cpu)) {
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
vcpu->arch.guest_kernel_asid[cpu] = vcpu->arch.guest_kernel_asid[cpu] =
vcpu->arch.guest_kernel_mm.context.asid[cpu]; vcpu->arch.guest_kernel_mm.context.asid[cpu];
...@@ -672,7 +680,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -672,7 +680,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*/ */
if (current->flags & PF_VCPU) { if (current->flags & PF_VCPU) {
write_c0_entryhi(vcpu->arch. write_c0_entryhi(vcpu->arch.
preempt_entryhi & ASID_MASK); preempt_entryhi & asid_mask);
ehb(); ehb();
} }
} else { } else {
...@@ -687,11 +695,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -687,11 +695,11 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (KVM_GUEST_KERNEL_MODE(vcpu)) if (KVM_GUEST_KERNEL_MODE(vcpu))
write_c0_entryhi(vcpu->arch. write_c0_entryhi(vcpu->arch.
guest_kernel_asid[cpu] & guest_kernel_asid[cpu] &
ASID_MASK); asid_mask);
else else
write_c0_entryhi(vcpu->arch. write_c0_entryhi(vcpu->arch.
guest_user_asid[cpu] & guest_user_asid[cpu] &
ASID_MASK); asid_mask);
ehb(); ehb();
} }
} }
...@@ -721,7 +729,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -721,7 +729,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_mips_callbacks->vcpu_get_regs(vcpu); kvm_mips_callbacks->vcpu_get_regs(vcpu);
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
ASID_VERSION_MASK)) { asid_version_mask(cpu))) {
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
cpu_context(cpu, current->mm)); cpu_context(cpu, current->mm));
drop_mmu_context(current->mm, cpu); drop_mmu_context(current->mm, cpu);
......
...@@ -73,6 +73,8 @@ static void dump_tlb(int first, int last) ...@@ -73,6 +73,8 @@ static void dump_tlb(int first, int last)
unsigned long s_entryhi, entryhi, asid; unsigned long s_entryhi, entryhi, asid;
unsigned long long entrylo0, entrylo1, pa; unsigned long long entrylo0, entrylo1, pa;
unsigned int s_index, s_pagemask, pagemask, c0, c1, i; unsigned int s_index, s_pagemask, pagemask, c0, c1, i;
unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
#ifdef CONFIG_32BIT #ifdef CONFIG_32BIT
bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA); bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
int pwidth = xpa ? 11 : 8; int pwidth = xpa ? 11 : 8;
...@@ -86,7 +88,7 @@ static void dump_tlb(int first, int last) ...@@ -86,7 +88,7 @@ static void dump_tlb(int first, int last)
s_pagemask = read_c0_pagemask(); s_pagemask = read_c0_pagemask();
s_entryhi = read_c0_entryhi(); s_entryhi = read_c0_entryhi();
s_index = read_c0_index(); s_index = read_c0_index();
asid = s_entryhi & 0xff; asid = s_entryhi & asidmask;
for (i = first; i <= last; i++) { for (i = first; i <= last; i++) {
write_c0_index(i); write_c0_index(i);
...@@ -115,7 +117,7 @@ static void dump_tlb(int first, int last) ...@@ -115,7 +117,7 @@ static void dump_tlb(int first, int last)
* due to duplicate TLB entry. * due to duplicate TLB entry.
*/ */
if (!((entrylo0 | entrylo1) & ENTRYLO_G) && if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
(entryhi & 0xff) != asid) (entryhi & asidmask) != asid)
continue; continue;
/* /*
...@@ -126,9 +128,9 @@ static void dump_tlb(int first, int last) ...@@ -126,9 +128,9 @@ static void dump_tlb(int first, int last)
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
printk("va=%0*lx asid=%02lx\n", printk("va=%0*lx asid=%0*lx\n",
vwidth, (entryhi & ~0x1fffUL), vwidth, (entryhi & ~0x1fffUL),
entryhi & 0xff); asidwidth, entryhi & asidmask);
/* RI/XI are in awkward places, so mask them off separately */ /* RI/XI are in awkward places, so mask them off separately */
pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa) if (xpa)
......
...@@ -29,9 +29,10 @@ static void dump_tlb(int first, int last) ...@@ -29,9 +29,10 @@ static void dump_tlb(int first, int last)
{ {
int i; int i;
unsigned int asid; unsigned int asid;
unsigned long entryhi, entrylo0; unsigned long entryhi, entrylo0, asid_mask;
asid = read_c0_entryhi() & ASID_MASK; asid_mask = cpu_asid_mask(&current_cpu_data);
asid = read_c0_entryhi() & asid_mask;
for (i = first; i <= last; i++) { for (i = first; i <= last; i++) {
write_c0_index(i<<8); write_c0_index(i<<8);
...@@ -46,7 +47,7 @@ static void dump_tlb(int first, int last) ...@@ -46,7 +47,7 @@ static void dump_tlb(int first, int last)
/* Unused entries have a virtual address of KSEG0. */ /* Unused entries have a virtual address of KSEG0. */
if ((entryhi & PAGE_MASK) != KSEG0 && if ((entryhi & PAGE_MASK) != KSEG0 &&
(entrylo0 & R3K_ENTRYLO_G || (entrylo0 & R3K_ENTRYLO_G ||
(entryhi & ASID_MASK) == asid)) { (entryhi & asid_mask) == asid)) {
/* /*
* Only print entries in use * Only print entries in use
*/ */
...@@ -55,7 +56,7 @@ static void dump_tlb(int first, int last) ...@@ -55,7 +56,7 @@ static void dump_tlb(int first, int last)
printk("va=%08lx asid=%08lx" printk("va=%08lx asid=%08lx"
" [pa=%06lx n=%d d=%d v=%d g=%d]", " [pa=%06lx n=%d d=%d v=%d g=%d]",
entryhi & PAGE_MASK, entryhi & PAGE_MASK,
entryhi & ASID_MASK, entryhi & asid_mask,
entrylo0 & PAGE_MASK, entrylo0 & PAGE_MASK,
(entrylo0 & R3K_ENTRYLO_N) ? 1 : 0, (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_D) ? 1 : 0, (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
......
...@@ -43,7 +43,7 @@ static void local_flush_tlb_from(int entry) ...@@ -43,7 +43,7 @@ static void local_flush_tlb_from(int entry)
{ {
unsigned long old_ctx; unsigned long old_ctx;
old_ctx = read_c0_entryhi() & ASID_MASK; old_ctx = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
write_c0_entrylo0(0); write_c0_entrylo0(0);
while (entry < current_cpu_data.tlbsize) { while (entry < current_cpu_data.tlbsize) {
write_c0_index(entry << 8); write_c0_index(entry << 8);
...@@ -81,6 +81,7 @@ void local_flush_tlb_mm(struct mm_struct *mm) ...@@ -81,6 +81,7 @@ void local_flush_tlb_mm(struct mm_struct *mm)
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end) unsigned long end)
{ {
unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -89,13 +90,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ...@@ -89,13 +90,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
#ifdef DEBUG_TLB #ifdef DEBUG_TLB
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
cpu_context(cpu, mm) & ASID_MASK, start, end); cpu_context(cpu, mm) & asid_mask, start, end);
#endif #endif
local_irq_save(flags); local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= current_cpu_data.tlbsize) { if (size <= current_cpu_data.tlbsize) {
int oldpid = read_c0_entryhi() & ASID_MASK; int oldpid = read_c0_entryhi() & asid_mask;
int newpid = cpu_context(cpu, mm) & ASID_MASK; int newpid = cpu_context(cpu, mm) & asid_mask;
start &= PAGE_MASK; start &= PAGE_MASK;
end += PAGE_SIZE - 1; end += PAGE_SIZE - 1;
...@@ -159,6 +160,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) ...@@ -159,6 +160,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{ {
unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) { if (cpu_context(cpu, vma->vm_mm) != 0) {
...@@ -168,10 +170,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -168,10 +170,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
#ifdef DEBUG_TLB #ifdef DEBUG_TLB
printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
#endif #endif
newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
page &= PAGE_MASK; page &= PAGE_MASK;
local_irq_save(flags); local_irq_save(flags);
oldpid = read_c0_entryhi() & ASID_MASK; oldpid = read_c0_entryhi() & asid_mask;
write_c0_entryhi(page | newpid); write_c0_entryhi(page | newpid);
BARRIER; BARRIER;
tlb_probe(); tlb_probe();
...@@ -190,6 +192,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -190,6 +192,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{ {
unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
unsigned long flags; unsigned long flags;
int idx, pid; int idx, pid;
...@@ -199,10 +202,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) ...@@ -199,10 +202,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
if (current->active_mm != vma->vm_mm) if (current->active_mm != vma->vm_mm)
return; return;
pid = read_c0_entryhi() & ASID_MASK; pid = read_c0_entryhi() & asid_mask;
#ifdef DEBUG_TLB #ifdef DEBUG_TLB
if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
(cpu_context(cpu, vma->vm_mm)), pid); (cpu_context(cpu, vma->vm_mm)), pid);
} }
...@@ -228,6 +231,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) ...@@ -228,6 +231,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask) unsigned long entryhi, unsigned long pagemask)
{ {
unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
unsigned long flags; unsigned long flags;
unsigned long old_ctx; unsigned long old_ctx;
static unsigned long wired = 0; static unsigned long wired = 0;
...@@ -243,7 +247,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, ...@@ -243,7 +247,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
local_irq_save(flags); local_irq_save(flags);
/* Save old context and create impossible VPN2 value */ /* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi() & ASID_MASK; old_ctx = read_c0_entryhi() & asid_mask;
old_pagemask = read_c0_pagemask(); old_pagemask = read_c0_pagemask();
w = read_c0_wired(); w = read_c0_wired();
write_c0_wired(w + 1); write_c0_wired(w + 1);
...@@ -266,7 +270,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, ...@@ -266,7 +270,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#endif #endif
local_irq_save(flags); local_irq_save(flags);
old_ctx = read_c0_entryhi() & ASID_MASK; old_ctx = read_c0_entryhi() & asid_mask;
write_c0_entrylo0(entrylo0); write_c0_entrylo0(entrylo0);
write_c0_entryhi(entryhi); write_c0_entryhi(entryhi);
write_c0_index(wired); write_c0_index(wired);
......
...@@ -304,7 +304,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ...@@ -304,7 +304,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
local_irq_save(flags); local_irq_save(flags);
htw_stop(); htw_stop();
pid = read_c0_entryhi() & ASID_MASK; pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
address &= (PAGE_MASK << 1); address &= (PAGE_MASK << 1);
write_c0_entryhi(address | pid); write_c0_entryhi(address | pid);
pgdp = pgd_offset(vma->vm_mm, address); pgdp = pgd_offset(vma->vm_mm, address);
......
...@@ -194,7 +194,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ...@@ -194,7 +194,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
if (current->active_mm != vma->vm_mm) if (current->active_mm != vma->vm_mm)
return; return;
pid = read_c0_entryhi() & ASID_MASK; pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
local_irq_save(flags); local_irq_save(flags);
address &= PAGE_MASK; address &= PAGE_MASK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment