Commit a3c4946d authored by Ralf Baechle's avatar Ralf Baechle

[MIPS] SB1: Fix interrupt disable hazard.

    
The SB1 core has a three cycle interrupt disable hazard but we were
wrongly treating it as fully interlocked.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 3a2f7357
...@@ -3,7 +3,9 @@ ...@@ -3,7 +3,9 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2003, 2004 Ralf Baechle * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) MIPS Technologies, Inc.
* written by Ralf Baechle <ralf@linux-mips.org>
*/ */
#ifndef _ASM_HAZARDS_H #ifndef _ASM_HAZARDS_H
#define _ASM_HAZARDS_H #define _ASM_HAZARDS_H
...@@ -74,8 +76,7 @@ ...@@ -74,8 +76,7 @@
#define irq_disable_hazard #define irq_disable_hazard
_ehb _ehb
#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \ #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
defined(CONFIG_CPU_SB1)
/* /*
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
...@@ -99,13 +100,13 @@ ...@@ -99,13 +100,13 @@
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLY__ */
__asm__( __asm__(
" .macro _ssnop \n\t" " .macro _ssnop \n"
" sll $0, $0, 1 \n\t" " sll $0, $0, 1 \n"
" .endm \n\t" " .endm \n"
" \n\t" " \n"
" .macro _ehb \n\t" " .macro _ehb \n"
" sll $0, $0, 3 \n\t" " sll $0, $0, 3 \n"
" .endm \n\t"); " .endm \n");
#ifdef CONFIG_CPU_RM9000 #ifdef CONFIG_CPU_RM9000
...@@ -117,17 +118,21 @@ __asm__( ...@@ -117,17 +118,21 @@ __asm__(
#define mtc0_tlbw_hazard() \ #define mtc0_tlbw_hazard() \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tmips32\n\t" \ " .set mips32 \n" \
"_ssnop; _ssnop; _ssnop; _ssnop\n\t" \ " _ssnop \n" \
".set\tmips0") " _ssnop \n" \
" _ssnop \n" \
" _ssnop \n" \
" .set mips0 \n")
#define tlbw_use_hazard() \ #define tlbw_use_hazard() \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set\tmips32\n\t" \ " .set mips32 \n" \
"_ssnop; _ssnop; _ssnop; _ssnop\n\t" \ " _ssnop \n" \
".set\tmips0") " _ssnop \n" \
" _ssnop \n" \
#define back_to_back_c0_hazard() do { } while (0) " _ssnop \n" \
" .set mips0 \n")
#else #else
...@@ -136,15 +141,25 @@ __asm__( ...@@ -136,15 +141,25 @@ __asm__(
*/ */
#define mtc0_tlbw_hazard() \ #define mtc0_tlbw_hazard() \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set noreorder\n\t" \ " .set noreorder \n" \
"nop; nop; nop; nop; nop; nop;\n\t" \ " nop \n" \
".set reorder\n\t") " nop \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" .set reorder \n")
#define tlbw_use_hazard() \ #define tlbw_use_hazard() \
__asm__ __volatile__( \ __asm__ __volatile__( \
".set noreorder\n\t" \ " .set noreorder \n" \
"nop; nop; nop; nop; nop; nop;\n\t" \ " nop \n" \
".set reorder\n\t") " nop \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" nop \n" \
" .set reorder \n")
#endif #endif
...@@ -156,49 +171,26 @@ __asm__( ...@@ -156,49 +171,26 @@ __asm__(
#ifdef CONFIG_CPU_MIPSR2 #ifdef CONFIG_CPU_MIPSR2
__asm__( __asm__(" .macro irq_enable_hazard \n"
" .macro\tirq_enable_hazard \n\t" " _ehb \n"
" _ehb \n\t" " .endm \n"
" .endm \n\t" " \n"
" \n\t" " .macro irq_disable_hazard \n"
" .macro\tirq_disable_hazard \n\t" " _ehb \n"
" _ehb \n\t" " .endm \n");
" .endm \n\t"
" \n\t"
" .macro\tback_to_back_c0_hazard \n\t"
" _ehb \n\t"
" .endm");
#define irq_enable_hazard() \
__asm__ __volatile__( \
"irq_enable_hazard")
#define irq_disable_hazard() \ #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
__asm__ __volatile__( \
"irq_disable_hazard")
#define back_to_back_c0_hazard() \
__asm__ __volatile__( \
"back_to_back_c0_hazard")
#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
defined(CONFIG_CPU_SB1)
/* /*
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
*/ */
__asm__( __asm__(
" .macro\tirq_enable_hazard \n\t" " .macro irq_enable_hazard \n"
" .endm \n\t" " .endm \n"
" \n\t" " \n"
" .macro\tirq_disable_hazard \n\t" " .macro irq_disable_hazard \n"
" .endm"); " .endm \n");
#define irq_enable_hazard() do { } while (0)
#define irq_disable_hazard() do { } while (0)
#define back_to_back_c0_hazard() do { } while (0)
#else #else
...@@ -209,29 +201,63 @@ __asm__( ...@@ -209,29 +201,63 @@ __asm__(
*/ */
__asm__( __asm__(
" # \n\t" " # \n"
" # There is a hazard but we do not care \n\t" " # There is a hazard but we do not care \n"
" # \n\t" " # \n"
" .macro\tirq_enable_hazard \n\t" " .macro\tirq_enable_hazard \n"
" .endm \n\t" " .endm \n"
" \n\t" " \n"
" .macro\tirq_disable_hazard \n\t" " .macro\tirq_disable_hazard \n"
" _ssnop; _ssnop; _ssnop \n\t" " _ssnop \n"
" .endm"); " _ssnop \n"
" _ssnop \n"
" .endm \n");
#define irq_enable_hazard() do { } while (0) #endif
#define irq_enable_hazard() \
__asm__ __volatile__("irq_enable_hazard")
#define irq_disable_hazard() \ #define irq_disable_hazard() \
__asm__ __volatile__( \ __asm__ __volatile__("irq_disable_hazard")
"irq_disable_hazard")
#define back_to_back_c0_hazard() \
__asm__ __volatile__( \ /*
" .set noreorder \n" \ * Back-to-back hazards -
" nop; nop; nop \n" \ *
" .set reorder \n") * What is needed to separate a move to cp0 from a subsequent read from the
* same cp0 register?
*/
#ifdef CONFIG_CPU_MIPSR2
__asm__(" .macro back_to_back_c0_hazard \n"
" _ehb \n"
" .endm \n");
#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
defined(CONFIG_CPU_SB1)
__asm__(" .macro back_to_back_c0_hazard \n"
" .endm \n");
#else
__asm__(" .macro back_to_back_c0_hazard \n"
" .set noreorder \n"
" _ssnop \n"
" _ssnop \n"
" _ssnop \n"
" .set reorder \n"
" .endm");
#endif #endif
#define back_to_back_c0_hazard() \
__asm__ __volatile__("back_to_back_c0_hazard")
/*
* Instruction execution hazard
*/
#ifdef CONFIG_CPU_MIPSR2 #ifdef CONFIG_CPU_MIPSR2
/* /*
* gcc has a tradition of misscompiling the previous construct using the * gcc has a tradition of misscompiling the previous construct using the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment