Commit 4b6f99d3 authored by James Hogan's avatar James Hogan Committed by Ralf Baechle

MIPS: mm: Don't do MTHC0 if XPA not present

Performing an MTHC0 instruction without XPA being present will trigger a
reserved instruction exception, therefore conditionalise the use of this
instruction when building TLB handlers (build_update_entries()), and in
__update_tlb().

This allows an XPA kernel to run on non XPA hardware without that
instruction implemented, just like it can run on XPA capable hardware
without XPA in use (with the noxpa kernel argument) or with XPA not
configured in hardware.

[paul.burton@imgtec.com:
  - Rebase atop other TLB work.
  - Add "mm" to subject.
  - Handle the __kmap_pgprot case.]

Fixes: c5b36783 ("MIPS: Add support for XPA.")
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Signed-off-by: default avatarPaul Burton <paul.burton@imgtec.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/13124/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 2caa89b4
...@@ -112,9 +112,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) ...@@ -112,9 +112,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
write_c0_entrylo0(entrylo); write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo); write_c0_entrylo1(entrylo);
#ifdef CONFIG_XPA #ifdef CONFIG_XPA
entrylo = (pte.pte_low & _PFNX_MASK); if (cpu_has_xpa) {
writex_c0_entrylo0(entrylo); entrylo = (pte.pte_low & _PFNX_MASK);
writex_c0_entrylo1(entrylo); writex_c0_entrylo0(entrylo);
writex_c0_entrylo1(entrylo);
}
#endif #endif
tlbidx = read_c0_wired(); tlbidx = read_c0_wired();
write_c0_wired(tlbidx + 1); write_c0_wired(tlbidx + 1);
......
...@@ -339,10 +339,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ...@@ -339,10 +339,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#ifdef CONFIG_XPA #ifdef CONFIG_XPA
write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); if (cpu_has_xpa)
writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
ptep++; ptep++;
write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); if (cpu_has_xpa)
writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
#else #else
write_c0_entrylo0(ptep->pte_high); write_c0_entrylo0(ptep->pte_high);
ptep++; ptep++;
......
...@@ -1030,17 +1030,21 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) ...@@ -1030,17 +1030,21 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0); UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
uasm_i_lw(p, tmp, 0, ptep); if (cpu_has_xpa && !mips_xpa_disabled) {
uasm_i_ext(p, tmp, tmp, 0, 24); uasm_i_lw(p, tmp, 0, ptep);
uasm_i_mthc0(p, tmp, C0_ENTRYLO0); uasm_i_ext(p, tmp, tmp, 0, 24);
uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
}
uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */ uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO1); UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
uasm_i_lw(p, tmp, sizeof(pte_t), ptep); if (cpu_has_xpa && !mips_xpa_disabled) {
uasm_i_ext(p, tmp, tmp, 0, 24); uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
uasm_i_mthc0(p, tmp, C0_ENTRYLO1); uasm_i_ext(p, tmp, tmp, 0, 24);
uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
}
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment