Commit e8b50608 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mips_fixes_5.0_3' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS fixes from Paul Burton:
 "A batch of MIPS fixes for 5.0, nothing too scary.

   - A workaround for a Loongson 3 CPU bug is the biggest change, but
     still fairly straightforward. It adds extra memory barriers (sync
     instructions) around atomics to avoid a CPU bug that can break
     atomicity.

   - Loongson64 also sees a fix for powering off some systems which
     would incorrectly reboot rather than waiting for the power down
     sequence to complete.

   - We have DT fixes for the Ingenic JZ4740 SoC & the JZ4780-based Ci20
     board, and a DT warning fix for the Nexsys4/MIPSfpga board.

   - The Cavium Octeon platform sees a further fix to the behaviour of
     the pcie_disable command line argument that was introduced in v3.3.

   - The VDSO, introduced in v4.4, sees build fixes for configurations
     of GCC that were built using the --with-fp-32= flag to specify a
     default 32-bit floating point ABI.

   - get_frame_info() sees a fix for configurations with
     CONFIG_KALLSYMS=n, for which it previously always returned an
     error.

   - If the MIPS Coherence Manager (CM) reports an error then we'll now
     clear that error correctly so that the GCR_ERROR_CAUSE register
     will be updated with information about any future errors"

* tag 'mips_fixes_5.0_3' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux:
  mips: cm: reprime error cause
  mips: loongson64: remove unreachable(), fix loongson_poweroff().
  MIPS: Remove function size check in get_frame_info()
  MIPS: Use lower case for addresses in nexys4ddr.dts
  MIPS: Loongson: Introduce and use loongson_llsc_mb()
  MIPS: VDSO: Include $(ccflags-vdso) in o32,n32 .lds builds
  MIPS: VDSO: Use same -m%-float cflag as the kernel proper
  MIPS: OCTEON: don't set octeon_dma_bar_type if PCI is disabled
  DTS: CI20: Fix bugs in ci20's device tree.
  MIPS: DTS: jz4740: Correct interrupt number of DMA core
parents e5a8a116 05dc6001
...@@ -1403,6 +1403,21 @@ config LOONGSON3_ENHANCEMENT ...@@ -1403,6 +1403,21 @@ config LOONGSON3_ENHANCEMENT
please say 'N' here. If you want a high-performance kernel to run on please say 'N' here. If you want a high-performance kernel to run on
new Loongson 3 machines only, please say 'Y' here. new Loongson 3 machines only, please say 'Y' here.
config CPU_LOONGSON3_WORKAROUNDS
bool "Old Loongson 3 LLSC Workarounds"
default y if SMP
depends on CPU_LOONGSON3
help
Loongson 3 processors have the llsc issues which require workarounds.
Without workarounds the system may hang unexpectedly.
Newer Loongson 3 will fix these issues and no workarounds are needed.
The workarounds have no significant side effect on them but may
decrease the performance of the system so this option should be
disabled unless the kernel is intended to be run on old systems.
If unsure, please say Y.
config CPU_LOONGSON2E config CPU_LOONGSON2E
bool "Loongson 2E" bool "Loongson 2E"
depends on SYS_HAS_CPU_LOONGSON2E depends on SYS_HAS_CPU_LOONGSON2E
......
...@@ -76,7 +76,7 @@ &uart3 { ...@@ -76,7 +76,7 @@ &uart3 {
status = "okay"; status = "okay";
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pins_uart2>; pinctrl-0 = <&pins_uart3>;
}; };
&uart4 { &uart4 {
...@@ -196,9 +196,9 @@ pins_uart1: uart1 { ...@@ -196,9 +196,9 @@ pins_uart1: uart1 {
bias-disable; bias-disable;
}; };
pins_uart2: uart2 { pins_uart3: uart3 {
function = "uart2"; function = "uart3";
groups = "uart2-data", "uart2-hwflow"; groups = "uart3-data", "uart3-hwflow";
bias-disable; bias-disable;
}; };
......
...@@ -161,7 +161,7 @@ dmac: dma-controller@13020000 { ...@@ -161,7 +161,7 @@ dmac: dma-controller@13020000 {
#dma-cells = <2>; #dma-cells = <2>;
interrupt-parent = <&intc>; interrupt-parent = <&intc>;
interrupts = <29>; interrupts = <20>;
clocks = <&cgu JZ4740_CLK_DMA>; clocks = <&cgu JZ4740_CLK_DMA>;
......
...@@ -90,11 +90,11 @@ axi_uart16550: serial@10400000 { ...@@ -90,11 +90,11 @@ axi_uart16550: serial@10400000 {
interrupts = <0>; interrupts = <0>;
}; };
axi_i2c: i2c@10A00000 { axi_i2c: i2c@10a00000 {
compatible = "xlnx,xps-iic-2.00.a"; compatible = "xlnx,xps-iic-2.00.a";
interrupt-parent = <&axi_intc>; interrupt-parent = <&axi_intc>;
interrupts = <4>; interrupts = <4>;
reg = < 0x10A00000 0x10000 >; reg = < 0x10a00000 0x10000 >;
clocks = <&ext>; clocks = <&ext>;
xlnx,clk-freq = <0x5f5e100>; xlnx,clk-freq = <0x5f5e100>;
xlnx,family = "Artix7"; xlnx,family = "Artix7";
...@@ -106,9 +106,9 @@ axi_i2c: i2c@10A00000 { ...@@ -106,9 +106,9 @@ axi_i2c: i2c@10A00000 {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
ad7420@4B { ad7420@4b {
compatible = "adi,adt7420"; compatible = "adi,adt7420";
reg = <0x4B>; reg = <0x4b>;
}; };
} ; } ;
}; };
......
...@@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ ...@@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
if (kernel_uses_llsc) { \ if (kernel_uses_llsc) { \
int temp; \ int temp; \
\ \
loongson_llsc_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
...@@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ ...@@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
if (kernel_uses_llsc) { \ if (kernel_uses_llsc) { \
int temp; \ int temp; \
\ \
loongson_llsc_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
...@@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ ...@@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
if (kernel_uses_llsc) { \ if (kernel_uses_llsc) { \
int temp; \ int temp; \
\ \
loongson_llsc_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
...@@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ ...@@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
if (kernel_uses_llsc) { \ if (kernel_uses_llsc) { \
long temp; \ long temp; \
\ \
loongson_llsc_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
...@@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ ...@@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
if (kernel_uses_llsc) { \ if (kernel_uses_llsc) { \
long temp; \ long temp; \
\ \
loongson_llsc_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
...@@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ ...@@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
if (kernel_uses_llsc) { \ if (kernel_uses_llsc) { \
long temp; \ long temp; \
\ \
loongson_llsc_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \ " .set "MIPS_ISA_LEVEL" \n" \
......
...@@ -222,6 +222,42 @@ ...@@ -222,6 +222,42 @@
#define __smp_mb__before_atomic() __smp_mb__before_llsc() #define __smp_mb__before_atomic() __smp_mb__before_llsc()
#define __smp_mb__after_atomic() smp_llsc_mb() #define __smp_mb__after_atomic() smp_llsc_mb()
/*
* Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
* store or pref) in between an ll & sc can cause the sc instruction to
* erroneously succeed, breaking atomicity. Whilst it's unusual to write code
* containing such sequences, this bug bites harder than we might otherwise
* expect due to reordering & speculation:
*
* 1) A memory access appearing prior to the ll in program order may actually
* be executed after the ll - this is the reordering case.
*
* In order to avoid this we need to place a memory barrier (ie. a sync
* instruction) prior to every ll instruction, in between it & any earlier
* memory access instructions. Many of these cases are already covered by
* smp_mb__before_llsc() but for the remaining cases, typically ones in
* which multiple CPUs may operate on a memory location but ordering is not
* usually guaranteed, we use loongson_llsc_mb() below.
*
* This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
*
* 2) If a conditional branch exists between an ll & sc with a target outside
* of the ll-sc loop, for example an exit upon value mismatch in cmpxchg()
* or similar, then misprediction of the branch may allow speculative
* execution of memory accesses from outside of the ll-sc loop.
*
* In order to avoid this we need a memory barrier (ie. a sync instruction)
* at each affected branch target, for which we also use loongson_llsc_mb()
* defined below.
*
* This case affects all current Loongson 3 CPUs.
*/
#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
#else
#define loongson_llsc_mb() do { } while (0)
#endif
#include <asm-generic/barrier.h> #include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */ #endif /* __ASM_BARRIER_H */
...@@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) { } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
loongson_llsc_mb();
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" " __LL "%0, %1 # set_bit \n" " " __LL "%0, %1 # set_bit \n"
...@@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
} while (unlikely(!temp)); } while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
loongson_llsc_mb();
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n" " .set push \n"
...@@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
: "ir" (~(1UL << bit))); : "ir" (~(1UL << bit)));
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) { } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
loongson_llsc_mb();
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" " __LL "%0, %1 # clear_bit \n" " " __LL "%0, %1 # clear_bit \n"
...@@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
} while (unlikely(!temp)); } while (unlikely(!temp));
#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
loongson_llsc_mb();
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n" " .set push \n"
...@@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp; unsigned long temp;
loongson_llsc_mb();
do { do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n" " .set push \n"
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
"i" (-EFAULT) \ "i" (-EFAULT) \
: "memory"); \ : "memory"); \
} else if (cpu_has_llsc) { \ } else if (cpu_has_llsc) { \
loongson_llsc_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noat \n" \ " .set noat \n" \
...@@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"i" (-EFAULT) "i" (-EFAULT)
: "memory"); : "memory");
} else if (cpu_has_llsc) { } else if (cpu_has_llsc) {
loongson_llsc_mb();
__asm__ __volatile__( __asm__ __volatile__(
"# futex_atomic_cmpxchg_inatomic \n" "# futex_atomic_cmpxchg_inatomic \n"
" .set push \n" " .set push \n"
...@@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
"i" (-EFAULT) "i" (-EFAULT)
: "memory"); : "memory");
loongson_llsc_mb();
} else } else
return -ENOSYS; return -ENOSYS;
......
...@@ -228,6 +228,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) ...@@ -228,6 +228,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global)); : [global] "r" (page_global));
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
loongson_llsc_mb();
__asm__ __volatile__ ( __asm__ __volatile__ (
" .set push \n" " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n" " .set "MIPS_ISA_ARCH_LEVEL" \n"
...@@ -242,6 +243,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) ...@@ -242,6 +243,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
" .set pop \n" " .set pop \n"
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global)); : [global] "r" (page_global));
loongson_llsc_mb();
} }
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
if (pte_none(*buddy)) if (pte_none(*buddy))
......
...@@ -457,5 +457,5 @@ void mips_cm_error_report(void) ...@@ -457,5 +457,5 @@ void mips_cm_error_report(void)
} }
/* reprime cause register */ /* reprime cause register */
write_gcr_error_cause(0); write_gcr_error_cause(cm_error);
} }
...@@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) ...@@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
static int get_frame_info(struct mips_frame_info *info) static int get_frame_info(struct mips_frame_info *info)
{ {
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
union mips_instruction insn, *ip, *ip_end; union mips_instruction insn, *ip;
const unsigned int max_insns = 128; const unsigned int max_insns = 128;
unsigned int last_insn_size = 0; unsigned int last_insn_size = 0;
unsigned int i; unsigned int i;
...@@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info) ...@@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info)
if (!ip) if (!ip)
goto err; goto err;
ip_end = (void *)ip + info->func_size; for (i = 0; i < max_insns; i++) {
for (i = 0; i < max_insns && ip < ip_end; i++) {
ip = (void *)ip + last_insn_size; ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) { if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.word = ip->halfword[0] << 16; insn.word = ip->halfword[0] << 16;
last_insn_size = 2; last_insn_size = 2;
......
...@@ -23,6 +23,29 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS ...@@ -23,6 +23,29 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
endif endif
cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap
#
# Some versions of binutils, not currently mainline as of 2019/02/04, support
# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
# to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a
# description).
#
# We disable this in order to prevent the assembler meddling with the
# instruction that labels refer to, ie. if we label an ll instruction:
#
# 1: ll v0, 0(a0)
#
# ...then with the assembler fix applied the label may actually point at a sync
# instruction inserted by the assembler, and if we were using the label in an
# exception table the table would no longer contain the address of the ll
# instruction.
#
# Avoid this by explicitly disabling that assembler behaviour. If upstream
# binutils does not merge support for the flag then we can revisit & remove
# this later - for now it ensures vendor toolchains don't cause problems.
#
cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
# #
# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
# as MIPS64 R2; older versions as just R1. This leaves the possibility open # as MIPS64 R2; older versions as just R1. This leaves the possibility open
......
...@@ -59,7 +59,12 @@ static void loongson_poweroff(void) ...@@ -59,7 +59,12 @@ static void loongson_poweroff(void)
{ {
#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
mach_prepare_shutdown(); mach_prepare_shutdown();
unreachable();
/*
* It needs a wait loop here, but mips/kernel/reset.c already calls
* a generic delay loop, machine_hang(), so simply return.
*/
return;
#else #else
void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
......
...@@ -932,6 +932,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, ...@@ -932,6 +932,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* to mimic that here by taking a load/istream page * to mimic that here by taking a load/istream page
* fault. * fault.
*/ */
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(p, 0);
UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
uasm_i_jr(p, ptr); uasm_i_jr(p, ptr);
...@@ -1646,6 +1648,8 @@ static void ...@@ -1646,6 +1648,8 @@ static void
iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(p, 0);
# ifdef CONFIG_PHYS_ADDR_T_64BIT # ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits) if (cpu_has_64bits)
uasm_i_lld(p, pte, 0, ptr); uasm_i_lld(p, pte, 0, ptr);
...@@ -2259,6 +2263,8 @@ static void build_r4000_tlb_load_handler(void) ...@@ -2259,6 +2263,8 @@ static void build_r4000_tlb_load_handler(void)
#endif #endif
uasm_l_nopage_tlbl(&l, p); uasm_l_nopage_tlbl(&l, p);
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(&p, 0);
build_restore_work_registers(&p); build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS #ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_0 & 1) { if ((unsigned long)tlb_do_page_fault_0 & 1) {
...@@ -2313,6 +2319,8 @@ static void build_r4000_tlb_store_handler(void) ...@@ -2313,6 +2319,8 @@ static void build_r4000_tlb_store_handler(void)
#endif #endif
uasm_l_nopage_tlbs(&l, p); uasm_l_nopage_tlbs(&l, p);
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(&p, 0);
build_restore_work_registers(&p); build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS #ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) { if ((unsigned long)tlb_do_page_fault_1 & 1) {
...@@ -2368,6 +2376,8 @@ static void build_r4000_tlb_modify_handler(void) ...@@ -2368,6 +2376,8 @@ static void build_r4000_tlb_modify_handler(void)
#endif #endif
uasm_l_nopage_tlbm(&l, p); uasm_l_nopage_tlbm(&l, p);
if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS))
uasm_i_sync(&p, 0);
build_restore_work_registers(&p); build_restore_work_registers(&p);
#ifdef CONFIG_CPU_MICROMIPS #ifdef CONFIG_CPU_MICROMIPS
if ((unsigned long)tlb_do_page_fault_1 & 1) { if ((unsigned long)tlb_do_page_fault_1 & 1) {
......
...@@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void) ...@@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void)
if (octeon_has_feature(OCTEON_FEATURE_PCIE)) if (octeon_has_feature(OCTEON_FEATURE_PCIE))
return 0; return 0;
if (!octeon_is_pci_host()) {
pr_notice("Not in host mode, PCI Controller not initialized\n");
return 0;
}
/* Point pcibios_map_irq() to the PCI version of it */ /* Point pcibios_map_irq() to the PCI version of it */
octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
...@@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void) ...@@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void)
else else
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
if (!octeon_is_pci_host()) {
pr_notice("Not in host mode, PCI Controller not initialized\n");
return 0;
}
/* PCI I/O and PCI MEM values */ /* PCI I/O and PCI MEM values */
set_io_port_base(OCTEON_PCI_IOSPACE_BASE); set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
ioport_resource.start = 0; ioport_resource.start = 0;
......
...@@ -8,6 +8,7 @@ ccflags-vdso := \ ...@@ -8,6 +8,7 @@ ccflags-vdso := \
$(filter -E%,$(KBUILD_CFLAGS)) \ $(filter -E%,$(KBUILD_CFLAGS)) \
$(filter -mmicromips,$(KBUILD_CFLAGS)) \ $(filter -mmicromips,$(KBUILD_CFLAGS)) \
$(filter -march=%,$(KBUILD_CFLAGS)) \ $(filter -march=%,$(KBUILD_CFLAGS)) \
$(filter -m%-float,$(KBUILD_CFLAGS)) \
-D__VDSO__ -D__VDSO__
ifdef CONFIG_CC_IS_CLANG ifdef CONFIG_CC_IS_CLANG
...@@ -129,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE ...@@ -129,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc) $(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c) $(call if_changed_rule,cc_o_c)
$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32 $(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S) $(call if_changed_dep,cpp_lds_S)
...@@ -169,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE ...@@ -169,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc) $(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c) $(call if_changed_rule,cc_o_c)
$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32 $(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S) $(call if_changed_dep,cpp_lds_S)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment