Commit d63e210e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS fixes from Ralf Baechle:
 "Random small fixes across the MIPS code."

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
  MIPS: CMP: Fix physical core number calculation logic
  MIPS: JZ4740: Forward declare struct uart_port in header.
  MIPS: JZ4740: Fix '#include guard' in serial.h
  MIPS: hugetlbfs: Fix hazard between tlb write and pagemask restoration.
  MIPS: Restore pagemask after dumping the TLB.
  MIPS: Hugetlbfs: Handle huge pages correctly in pmd_bad()
  MIPS: R5000: Fix TLB hazard handling.
  MIPS: tlbex: Deal with re-definition of label
  MIPS: Make __{,n,u}delay declarations match definitions and generic delay.h
parents a0a6a39e 0cc40dac
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
#include <linux/param.h> #include <linux/param.h>
extern void __delay(unsigned int loops); extern void __delay(unsigned long loops);
extern void __ndelay(unsigned int ns); extern void __ndelay(unsigned long ns);
extern void __udelay(unsigned int us); extern void __udelay(unsigned long us);
#define ndelay(ns) __ndelay(ns) #define ndelay(ns) __ndelay(ns)
#define udelay(us) __udelay(us) #define udelay(us) __udelay(us)
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#ifndef _ASM_PGTABLE_64_H #ifndef _ASM_PGTABLE_64_H
#define _ASM_PGTABLE_64_H #define _ASM_PGTABLE_64_H
#include <linux/compiler.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/addrspace.h> #include <asm/addrspace.h>
...@@ -172,7 +173,19 @@ static inline int pmd_none(pmd_t pmd) ...@@ -172,7 +173,19 @@ static inline int pmd_none(pmd_t pmd)
return pmd_val(pmd) == (unsigned long) invalid_pte_table; return pmd_val(pmd) == (unsigned long) invalid_pte_table;
} }
#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) static inline int pmd_bad(pmd_t pmd)
{
#ifdef CONFIG_HUGETLB_PAGE
/* pmd_huge(pmd) but inline */
if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
return 0;
#endif
if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
return 1;
return 0;
}
static inline int pmd_present(pmd_t pmd) static inline int pmd_present(pmd_t pmd)
{ {
......
...@@ -14,6 +14,9 @@ ...@@ -14,6 +14,9 @@
*/ */
#ifndef __MIPS_JZ4740_SERIAL_H__ #ifndef __MIPS_JZ4740_SERIAL_H__
#define __MIPS_JZ4740_SERIAL_H__
struct uart_port;
void jz4740_serial_out(struct uart_port *p, int offset, int value); void jz4740_serial_out(struct uart_port *p, int offset, int value);
......
...@@ -97,7 +97,7 @@ static void cmp_init_secondary(void) ...@@ -97,7 +97,7 @@ static void cmp_init_secondary(void)
/* Enable per-cpu interrupts: platform specific */ /* Enable per-cpu interrupts: platform specific */
c->core = (read_c0_ebase() >> 1) & 0xff; c->core = (read_c0_ebase() >> 1) & 0x1ff;
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
#endif #endif
......
...@@ -15,13 +15,17 @@ ...@@ -15,13 +15,17 @@
#include <asm/compiler.h> #include <asm/compiler.h>
#include <asm/war.h> #include <asm/war.h>
inline void __delay(unsigned int loops) void __delay(unsigned long loops)
{ {
__asm__ __volatile__ ( __asm__ __volatile__ (
" .set noreorder \n" " .set noreorder \n"
" .align 3 \n" " .align 3 \n"
"1: bnez %0, 1b \n" "1: bnez %0, 1b \n"
#if __SIZEOF_LONG__ == 4
" subu %0, 1 \n" " subu %0, 1 \n"
#else
" dsubu %0, 1 \n"
#endif
" .set reorder \n" " .set reorder \n"
: "=r" (loops) : "=r" (loops)
: "0" (loops)); : "0" (loops));
......
...@@ -50,8 +50,9 @@ static void dump_tlb(int first, int last) ...@@ -50,8 +50,9 @@ static void dump_tlb(int first, int last)
{ {
unsigned long s_entryhi, entryhi, asid; unsigned long s_entryhi, entryhi, asid;
unsigned long long entrylo0, entrylo1; unsigned long long entrylo0, entrylo1;
unsigned int s_index, pagemask, c0, c1, i; unsigned int s_index, s_pagemask, pagemask, c0, c1, i;
s_pagemask = read_c0_pagemask();
s_entryhi = read_c0_entryhi(); s_entryhi = read_c0_entryhi();
s_index = read_c0_index(); s_index = read_c0_index();
asid = s_entryhi & 0xff; asid = s_entryhi & 0xff;
...@@ -103,6 +104,7 @@ static void dump_tlb(int first, int last) ...@@ -103,6 +104,7 @@ static void dump_tlb(int first, int last)
write_c0_entryhi(s_entryhi); write_c0_entryhi(s_entryhi);
write_c0_index(s_index); write_c0_index(s_index);
write_c0_pagemask(s_pagemask);
} }
void dump_tlb_all(void) void dump_tlb_all(void)
......
...@@ -320,6 +320,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ...@@ -320,6 +320,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
tlb_write_random(); tlb_write_random();
else else
tlb_write_indexed(); tlb_write_indexed();
tlbw_use_hazard();
write_c0_pagemask(PM_DEFAULT_MASK); write_c0_pagemask(PM_DEFAULT_MASK);
} else } else
#endif #endif
......
...@@ -148,8 +148,8 @@ enum label_id { ...@@ -148,8 +148,8 @@ enum label_id {
label_leave, label_leave,
label_vmalloc, label_vmalloc,
label_vmalloc_done, label_vmalloc_done,
label_tlbw_hazard, label_tlbw_hazard_0,
label_split, label_split = label_tlbw_hazard_0 + 8,
label_tlbl_goaround1, label_tlbl_goaround1,
label_tlbl_goaround2, label_tlbl_goaround2,
label_nopage_tlbl, label_nopage_tlbl,
...@@ -167,7 +167,7 @@ UASM_L_LA(_second_part) ...@@ -167,7 +167,7 @@ UASM_L_LA(_second_part)
UASM_L_LA(_leave) UASM_L_LA(_leave)
UASM_L_LA(_vmalloc) UASM_L_LA(_vmalloc)
UASM_L_LA(_vmalloc_done) UASM_L_LA(_vmalloc_done)
UASM_L_LA(_tlbw_hazard) /* _tlbw_hazard_x is handled differently. */
UASM_L_LA(_split) UASM_L_LA(_split)
UASM_L_LA(_tlbl_goaround1) UASM_L_LA(_tlbl_goaround1)
UASM_L_LA(_tlbl_goaround2) UASM_L_LA(_tlbl_goaround2)
...@@ -181,6 +181,30 @@ UASM_L_LA(_large_segbits_fault) ...@@ -181,6 +181,30 @@ UASM_L_LA(_large_segbits_fault)
UASM_L_LA(_tlb_huge_update) UASM_L_LA(_tlb_huge_update)
#endif #endif
static int __cpuinitdata hazard_instance;
static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
{
switch (instance) {
case 0 ... 7:
uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
return;
default:
BUG();
}
}
static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
{
switch (instance) {
case 0 ... 7:
uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
break;
default:
BUG();
}
}
/* /*
* For debug purposes. * For debug purposes.
*/ */
...@@ -478,21 +502,28 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, ...@@ -478,21 +502,28 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
* This branch uses up a mtc0 hazard nop slot and saves * This branch uses up a mtc0 hazard nop slot and saves
* two nops after the tlbw instruction. * two nops after the tlbw instruction.
*/ */
uasm_il_bgezl(p, r, 0, label_tlbw_hazard); uasm_bgezl_hazard(p, r, hazard_instance);
tlbw(p); tlbw(p);
uasm_l_tlbw_hazard(l, *p); uasm_bgezl_label(l, p, hazard_instance);
hazard_instance++;
uasm_i_nop(p); uasm_i_nop(p);
break; break;
case CPU_R4600: case CPU_R4600:
case CPU_R4700: case CPU_R4700:
case CPU_R5000:
case CPU_R5000A:
uasm_i_nop(p); uasm_i_nop(p);
tlbw(p); tlbw(p);
uasm_i_nop(p); uasm_i_nop(p);
break; break;
case CPU_R5000:
case CPU_R5000A:
case CPU_NEVADA:
uasm_i_nop(p); /* QED specifies 2 nops hazard */
uasm_i_nop(p); /* QED specifies 2 nops hazard */
tlbw(p);
break;
case CPU_R4300: case CPU_R4300:
case CPU_5KC: case CPU_5KC:
case CPU_TX49XX: case CPU_TX49XX:
...@@ -526,17 +557,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, ...@@ -526,17 +557,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p); tlbw(p);
break; break;
case CPU_NEVADA:
uasm_i_nop(p); /* QED specifies 2 nops hazard */
/*
* This branch uses up a mtc0 hazard nop slot and saves
* a nop after the tlbw instruction.
*/
uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
tlbw(p);
uasm_l_tlbw_hazard(l, *p);
break;
case CPU_RM7000: case CPU_RM7000:
uasm_i_nop(p); uasm_i_nop(p);
uasm_i_nop(p); uasm_i_nop(p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment