Commit cc10ad25 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mips_4.20' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS updates from Paul Burton:

 - kexec support for the generic MIPS platform when running on a CPU
   including the MIPS Coherence Manager & related hardware.

 - Improvements to the definition of memory barriers used around MMIO
   accesses, and fixes in their use.

 - Switch to CONFIG_NO_BOOTMEM from Mike Rapoport, finally dropping
   reliance on the old bootmem code.

 - A number of fixes & improvements for Loongson 3 systems.

 - DT & config updates for the Microsemi Ocelot platform.

 - Workaround to enable USB power on the Netgear WNDR3400v3.

 - Various cleanups & fixes.

* tag 'mips_4.20' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (51 commits)
  MIPS: Cleanup DSP ASE detection
  MIPS: dts: Change upper case to lower case
  MIPS: generic: Add Network, SPI and I2C to ocelot_defconfig
  MIPS: Loongson-3: Fix BRIDGE irq delivery problem
  MIPS: Loongson-3: Fix CPU UART irq delivery problem
  MIPS: Remove unused PREF, PREFE & PREFX macros
  MIPS: lib: Use kernel_pref & user_pref in memcpy()
  MIPS: Remove unused CAT macro
  MIPS: Add kernel_pref & user_pref helpers
  MIPS: Remove unused TTABLE macro
  MIPS: Remove unused PIC macros
  MIPS: Remove unused MOVN & MOVZ macros
  MIPS: Provide actually relaxed MMIO accessors
  MIPS: Enforce strong ordering for MMIO accessors
  MIPS: Correct `mmiowb' barrier for `wbflush' platforms
  MIPS: Define MMIO ordering barriers
  MIPS: mscc: add PCB120 to the ocelot fitImage
  MIPS: mscc: add DT for Ocelot PCB120
  MIPS: memset: Limit excessive `noreorder' assembly mode use
  MIPS: memset: Fix CPU_DADDI_WORKAROUNDS `small_fixup' regression
  ...
parents ec9c1664 edbb4233
......@@ -21,6 +21,7 @@ config MIPS
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_LIB_ASHLDI3
......@@ -28,7 +29,6 @@ config MIPS
select GENERIC_LIB_CMPDI2
select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_UCMPDI2
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
......@@ -78,6 +78,7 @@ config MIPS
select RTC_LIB if !MACH_LOONGSON64
select SYSCTL_EXCEPTION_TRACE
select VIRT_TO_BUS
select NO_BOOTMEM
menu "Machine selection"
......@@ -132,6 +133,7 @@ config MIPS_GENERIC
select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select USE_OF
select UHI_BOOT
help
Select this to build a kernel which aims to support multiple boards,
generally using a flattened device tree passed from the bootloader
......@@ -1149,6 +1151,7 @@ config NO_IOPORT_MAP
config GENERIC_CSUM
bool
default y if !CPU_HAS_LOAD_STORE_LR
config GENERIC_ISA_DMA
bool
......@@ -1367,6 +1370,7 @@ config CPU_LOONGSON3
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
select CPU_HAS_LOAD_STORE_LR
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select MIPS_PGD_C0_CONTEXT
......@@ -1443,6 +1447,7 @@ config CPU_MIPS32_R1
bool "MIPS32 Release 1"
depends on SYS_HAS_CPU_MIPS32_R1
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
help
......@@ -1460,6 +1465,7 @@ config CPU_MIPS32_R2
bool "MIPS32 Release 2"
depends on SYS_HAS_CPU_MIPS32_R2
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
......@@ -1478,7 +1484,6 @@ config CPU_MIPS32_R6
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
select HAVE_KVM
select MIPS_O32_FP64_SUPPORT
help
......@@ -1491,6 +1496,7 @@ config CPU_MIPS64_R1
bool "MIPS64 Release 1"
depends on SYS_HAS_CPU_MIPS64_R1
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
......@@ -1510,6 +1516,7 @@ config CPU_MIPS64_R2
bool "MIPS64 Release 2"
depends on SYS_HAS_CPU_MIPS64_R2
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
......@@ -1531,7 +1538,6 @@ config CPU_MIPS64_R6
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
select HAVE_KVM
help
......@@ -1544,6 +1550,7 @@ config CPU_R3000
bool "R3000"
depends on SYS_HAS_CPU_R3000
select CPU_HAS_WB
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
help
......@@ -1558,12 +1565,14 @@ config CPU_TX39XX
bool "R39XX"
depends on SYS_HAS_CPU_TX39XX
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_HAS_LOAD_STORE_LR
config CPU_VR41XX
bool "R41xx"
depends on SYS_HAS_CPU_VR41XX
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_HAS_LOAD_STORE_LR
help
The options selects support for the NEC VR4100 series of processors.
Only choose this option if you have one of these processors as a
......@@ -1575,6 +1584,7 @@ config CPU_R4300
depends on SYS_HAS_CPU_R4300
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_HAS_LOAD_STORE_LR
help
MIPS Technologies R4300-series processors.
......@@ -1584,6 +1594,7 @@ config CPU_R4X00
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
select CPU_HAS_LOAD_STORE_LR
help
MIPS Technologies R4000-series processors other than 4300, including
the R4000, R4400, R4600, and 4700.
......@@ -1592,6 +1603,7 @@ config CPU_TX49XX
bool "R49XX"
depends on SYS_HAS_CPU_TX49XX
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
......@@ -1602,6 +1614,7 @@ config CPU_R5000
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
select CPU_HAS_LOAD_STORE_LR
help
MIPS Technologies R5000-series processors other than the Nevada.
......@@ -1611,6 +1624,7 @@ config CPU_R5432
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
select CPU_HAS_LOAD_STORE_LR
config CPU_R5500
bool "R5500"
......@@ -1618,6 +1632,7 @@ config CPU_R5500
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
select CPU_HAS_LOAD_STORE_LR
help
NEC VR5500 and VR5500A series processors implement 64-bit MIPS IV
instruction set.
......@@ -1628,6 +1643,7 @@ config CPU_NEVADA
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
select CPU_HAS_LOAD_STORE_LR
help
QED / PMC-Sierra RM52xx-series ("Nevada") processors.
......@@ -1635,6 +1651,7 @@ config CPU_R8000
bool "R8000"
depends on SYS_HAS_CPU_R8000
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_64BIT_KERNEL
help
MIPS Technologies R8000 processors. Note these processors are
......@@ -1644,6 +1661,7 @@ config CPU_R10000
bool "R10000"
depends on SYS_HAS_CPU_R10000
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
......@@ -1655,6 +1673,7 @@ config CPU_RM7000
bool "RM7000"
depends on SYS_HAS_CPU_RM7000
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
......@@ -1663,6 +1682,7 @@ config CPU_RM7000
config CPU_SB1
bool "SB1"
depends on SYS_HAS_CPU_SB1
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
......@@ -1673,6 +1693,7 @@ config CPU_CAVIUM_OCTEON
bool "Cavium Octeon processor"
depends on SYS_HAS_CPU_CAVIUM_OCTEON
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_64BIT_KERNEL
select WEAK_ORDERING
select CPU_SUPPORTS_HIGHMEM
......@@ -1702,6 +1723,7 @@ config CPU_BMIPS
select WEAK_ORDERING
select CPU_SUPPORTS_HIGHMEM
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_CPUFREQ
select MIPS_EXTERNAL_TIMER
help
......@@ -1710,6 +1732,7 @@ config CPU_BMIPS
config CPU_XLR
bool "Netlogic XLR SoC"
depends on SYS_HAS_CPU_XLR
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
......@@ -1728,6 +1751,7 @@ config CPU_XLP
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_MIPSR2
select CPU_SUPPORTS_HUGEPAGES
select MIPS_ASID_BITS_VARIABLE
......@@ -1833,12 +1857,14 @@ config CPU_LOONGSON2
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
select ARCH_HAS_PHYS_TO_DMA
select CPU_HAS_LOAD_STORE_LR
config CPU_LOONGSON1
bool
select CPU_MIPS32
select CPU_MIPSR1
select CPU_HAS_PREFETCH
select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_CPUFREQ
......@@ -2452,6 +2478,13 @@ config XKS01
config CPU_HAS_RIXI
bool
config CPU_HAS_LOAD_STORE_LR
bool
help
CPU has support for unaligned load and store instructions:
LWL, LWR, SWL, SWR (Load/store word left/right).
LDL, LDR, SDL, SDR (Load/store doubleword left/right, for 64bit systems).
#
# Vectored interrupt mode is an R2 feature
#
......@@ -2899,6 +2932,9 @@ config USE_OF
select OF_EARLY_FLATTREE
select IRQ_DOMAIN
config UHI_BOOT
bool
config BUILTIN_DTB
bool
......
......@@ -13,6 +13,7 @@
#
archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/mips/tools elf-entry
$(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs
KBUILD_DEFCONFIG := 32r2el_defconfig
......@@ -230,6 +231,8 @@ toolchain-xpa := $(call cc-option-yn,$(xpa-cflags-y) -mxpa)
cflags-$(toolchain-xpa) += -DTOOLCHAIN_SUPPORTS_XPA
toolchain-crc := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mcrc)
cflags-$(toolchain-crc) += -DTOOLCHAIN_SUPPORTS_CRC
toolchain-dsp := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mdsp)
cflags-$(toolchain-dsp) += -DTOOLCHAIN_SUPPORTS_DSP
#
# Firmware support
......@@ -257,13 +260,7 @@ ifdef CONFIG_PHYSICAL_START
load-y = $(CONFIG_PHYSICAL_START)
endif
# Sign-extend the entry point to 64 bits if retrieved as a 32-bit number.
entry-y = $(shell $(OBJDUMP) -f vmlinux 2>/dev/null \
| sed -n '/^start address / { \
s/^.* //; \
s/0x\([0-7].......\)$$/0x00000000\1/; \
s/0x\(........\)$$/0xffffffff\1/; p }')
entry-y = $(shell $(objtree)/arch/mips/tools/elf-entry vmlinux)
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/
......
......@@ -5,9 +5,8 @@
#include <bcm47xx_board.h>
#include <bcm47xx.h>
static void __init bcm47xx_workarounds_netgear_wnr3500l(void)
static void __init bcm47xx_workarounds_enable_usb_power(int usb_power)
{
const int usb_power = 12;
int err;
err = gpio_request_one(usb_power, GPIOF_OUT_INIT_HIGH, "usb_power");
......@@ -23,7 +22,10 @@ void __init bcm47xx_workarounds(void)
switch (board) {
case BCM47XX_BOARD_NETGEAR_WNR3500L:
bcm47xx_workarounds_netgear_wnr3500l();
bcm47xx_workarounds_enable_usb_power(12);
break;
case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
bcm47xx_workarounds_enable_usb_power(21);
break;
default:
/* No workaround(s) needed */
......
......@@ -153,8 +153,6 @@ void __init plat_time_init(void)
mips_hpt_frequency = freq;
}
extern const char __appended_dtb;
void __init plat_mem_setup(void)
{
void *dtb;
......@@ -164,15 +162,10 @@ void __init plat_mem_setup(void)
ioport_resource.start = 0;
ioport_resource.end = ~0;
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
if (!fdt_check_header(&__appended_dtb))
dtb = (void *)&__appended_dtb;
else
#endif
/* intended to somewhat resemble ARM; see Documentation/arm/Booting */
if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
dtb = phys_to_virt(fw_arg2);
else if (fw_passed_dtb) /* UHI interface */
else if (fw_passed_dtb) /* UHI interface or appended dtb */
dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
dtb = (void *)__dtb_start;
......
......@@ -10,12 +10,12 @@ cpu@0 {
};
};
biu@1F800000 {
biu@1f800000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "lantiq,biu", "simple-bus";
reg = <0x1F800000 0x800000>;
ranges = <0x0 0x1F800000 0x7FFFFF>;
reg = <0x1f800000 0x800000>;
ranges = <0x0 0x1f800000 0x7fffff>;
icu0: icu@80200 {
#interrupt-cells = <1>;
......@@ -24,18 +24,18 @@ icu0: icu@80200 {
reg = <0x80200 0x120>;
};
watchdog@803F0 {
watchdog@803f0 {
compatible = "lantiq,wdt";
reg = <0x803F0 0x10>;
reg = <0x803f0 0x10>;
};
};
sram@1F000000 {
sram@1f000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "lantiq,sram";
reg = <0x1F000000 0x800000>;
ranges = <0x0 0x1F000000 0x7FFFFF>;
reg = <0x1f000000 0x800000>;
ranges = <0x0 0x1f000000 0x7fffff>;
eiu0: eiu@101000 {
#interrupt-cells = <1>;
......@@ -66,41 +66,41 @@ fpi@10000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "lantiq,fpi", "simple-bus";
ranges = <0x0 0x10000000 0xEEFFFFF>;
reg = <0x10000000 0xEF00000>;
ranges = <0x0 0x10000000 0xeefffff>;
reg = <0x10000000 0xef00000>;
gptu@E100A00 {
gptu@e100a00 {
compatible = "lantiq,gptu-xway";
reg = <0xE100A00 0x100>;
reg = <0xe100a00 0x100>;
};
serial@E100C00 {
serial@e100c00 {
compatible = "lantiq,asc";
reg = <0xE100C00 0x400>;
reg = <0xe100c00 0x400>;
interrupt-parent = <&icu0>;
interrupts = <112 113 114>;
};
dma0: dma@E104100 {
dma0: dma@e104100 {
compatible = "lantiq,dma-xway";
reg = <0xE104100 0x800>;
reg = <0xe104100 0x800>;
};
ebu0: ebu@E105300 {
ebu0: ebu@e105300 {
compatible = "lantiq,ebu-xway";
reg = <0xE105300 0x100>;
reg = <0xe105300 0x100>;
};
pci0: pci@E105400 {
pci0: pci@e105400 {
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
compatible = "lantiq,pci-xway";
bus-range = <0x0 0x0>;
ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000 /* pci memory */
0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
0x1000000 0 0x00000000 0xae00000 0 0x200000>; /* io space */
reg = <0x7000000 0x8000 /* config space */
0xE105400 0x400>; /* pci bridge */
0xe105400 0x400>; /* pci bridge */
};
};
};
......@@ -52,14 +52,14 @@ partition@400000 {
};
};
gpio: pinmux@E100B10 {
gpio: pinmux@e100b10 {
compatible = "lantiq,danube-pinctrl";
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
#gpio-cells = <2>;
gpio-controller;
reg = <0xE100B10 0xA0>;
reg = <0xe100b10 0xa0>;
state_default: pinmux {
stp {
......@@ -82,26 +82,26 @@ conf_out {
};
};
etop@E180000 {
etop@e180000 {
compatible = "lantiq,etop-xway";
reg = <0xE180000 0x40000>;
reg = <0xe180000 0x40000>;
interrupt-parent = <&icu0>;
interrupts = <73 78>;
phy-mode = "rmii";
mac-address = [ 00 11 22 33 44 55 ];
};
stp0: stp@E100BB0 {
stp0: stp@e100bb0 {
#gpio-cells = <2>;
compatible = "lantiq,gpio-stp-xway";
gpio-controller;
reg = <0xE100BB0 0x40>;
reg = <0xe100bb0 0x40>;
lantiq,shadow = <0xfff>;
lantiq,groups = <0x3>;
};
pci@E105400 {
pci@e105400 {
lantiq,bus-clock = <33333333>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
......
dtb-$(CONFIG_MSCC_OCELOT) += ocelot_pcb123.dtb
dtb-$(CONFIG_MSCC_OCELOT) += ocelot_pcb123.dtb ocelot_pcb120.dtb
obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
......@@ -78,6 +78,19 @@ uart0: serial@100000 {
status = "disabled";
};
i2c: i2c@100400 {
compatible = "mscc,ocelot-i2c", "snps,designware-i2c";
pinctrl-0 = <&i2c_pins>;
pinctrl-names = "default";
reg = <0x100400 0x100>, <0x198 0x8>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <8>;
clocks = <&ahb_clk>;
status = "disabled";
};
uart2: serial@100800 {
pinctrl-0 = <&uart2_pins>;
pinctrl-names = "default";
......@@ -182,6 +195,11 @@ gpio: pinctrl@1070034 {
interrupts = <13>;
#interrupt-cells = <2>;
i2c_pins: i2c-pins {
pins = "GPIO_16", "GPIO_17";
function = "twi";
};
uart_pins: uart-pins {
pins = "GPIO_6", "GPIO_7";
function = "uart";
......@@ -196,6 +214,7 @@ miim1: miim1 {
pins = "GPIO_14", "GPIO_15";
function = "miim1";
};
};
mdio0: mdio@107009c {
......
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Copyright (c) 2017 Microsemi Corporation */
/dts-v1/;
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/phy/phy-ocelot-serdes.h>
#include "ocelot.dtsi"
/ {
compatible = "mscc,ocelot-pcb120", "mscc,ocelot";
chosen {
stdout-path = "serial0:115200n8";
};
memory@0 {
device_type = "memory";
reg = <0x0 0x0e000000>;
};
};
&gpio {
phy_int_pins: phy_int_pins {
pins = "GPIO_4";
function = "gpio";
};
};
&mdio0 {
status = "okay";
};
&mdio1 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&miim1>, <&phy_int_pins>;
phy7: ethernet-phy@0 {
reg = <0>;
interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gpio>;
};
phy6: ethernet-phy@1 {
reg = <1>;
interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gpio>;
};
phy5: ethernet-phy@2 {
reg = <2>;
interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gpio>;
};
phy4: ethernet-phy@3 {
reg = <3>;
interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gpio>;
};
};
&port0 {
phy-handle = <&phy0>;
};
&port1 {
phy-handle = <&phy1>;
};
&port2 {
phy-handle = <&phy2>;
};
&port3 {
phy-handle = <&phy3>;
};
&port4 {
phy-handle = <&phy7>;
phy-mode = "sgmii";
phys = <&serdes 4 SERDES1G(2)>;
};
&port5 {
phy-handle = <&phy4>;
phy-mode = "sgmii";
phys = <&serdes 5 SERDES1G(5)>;
};
&port6 {
phy-handle = <&phy6>;
phy-mode = "sgmii";
phys = <&serdes 6 SERDES1G(3)>;
};
&port9 {
phy-handle = <&phy5>;
phy-mode = "sgmii";
phys = <&serdes 9 SERDES1G(4)>;
};
&uart0 {
status = "okay";
};
&uart2 {
status = "okay";
};
......@@ -36,6 +36,12 @@ flash@0 {
};
};
&i2c {
clock-frequency = <100000>;
i2c-sda-hold-time-ns = <300>;
status = "okay";
};
&mdio0 {
status = "okay";
};
......
......@@ -1180,8 +1180,8 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
type = IRQ_TYPE_LEVEL_LOW;
break;
default:
pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
node->name,
pr_err("Error: (%pOFn) Invalid irq trigger specification: %x\n",
node,
trigger);
type = IRQ_TYPE_LEVEL_LOW;
break;
......@@ -2271,8 +2271,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
parent_irq = irq_of_parse_and_map(ciu_node, 0);
if (!parent_irq) {
pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
ciu_node->name);
pr_err("ERROR: Couldn't acquire parent_irq for %pOFn\n",
ciu_node);
return -EINVAL;
}
......@@ -2283,7 +2283,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
addr = of_get_address(ciu_node, 0, NULL, NULL);
if (!addr) {
pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
pr_err("ERROR: Couldn't acquire reg(0) %pOFn\n", ciu_node);
return -EINVAL;
}
host_data->raw_reg = (u64)phys_to_virt(
......@@ -2291,7 +2291,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
addr = of_get_address(ciu_node, 1, NULL, NULL);
if (!addr) {
pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
pr_err("ERROR: Couldn't acquire reg(1) %pOFn\n", ciu_node);
return -EINVAL;
}
host_data->en_reg = (u64)phys_to_virt(
......@@ -2299,8 +2299,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
if (r) {
pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
ciu_node->name);
pr_err("ERROR: Couldn't read cavium,max-bits from %pOFn\n",
ciu_node);
return r;
}
host_data->max_bits = val;
......
......@@ -1161,15 +1161,12 @@ void __init device_tree_init(void)
bool do_prune;
bool fill_mac;
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
if (!fdt_check_header(&__appended_dtb)) {
fdt = &__appended_dtb;
if (fw_passed_dtb) {
fdt = (void *)fw_passed_dtb;
do_prune = false;
fill_mac = true;
pr_info("Using appended Device Tree.\n");
} else
#endif
if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
} else if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
if (fdt_check_header(fdt))
panic("Corrupt Device Tree passed to kernel.");
......
......@@ -15,6 +15,7 @@
#include <linux/sched/task_stack.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kexec.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
......@@ -424,6 +425,9 @@ const struct plat_smp_ops octeon_smp_ops = {
.cpu_disable = octeon_cpu_disable,
.cpu_die = octeon_cpu_die,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
......@@ -501,6 +505,9 @@ static const struct plat_smp_ops octeon_78xx_smp_ops = {
.cpu_disable = octeon_cpu_disable,
.cpu_die = octeon_cpu_die,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
void __init octeon_setup_smp(void)
......
......@@ -18,17 +18,25 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_GPIO_SYSFS=y
CONFIG_NETDEVICES=y
CONFIG_MSCC_OCELOT_SWITCH=y
CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
CONFIG_MDIO_MSCC_MIIM=y
CONFIG_MICROSEMI_PHY=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_BITBANG=y
CONFIG_SPI_DESIGNWARE=y
CONFIG_SPI_DW_MMIO=y
CONFIG_SPI_SPIDEV=y
CONFIG_GPIO_SYSFS=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_OCELOT_RESET=y
......
......@@ -65,11 +65,11 @@ config FIT_IMAGE_FDT_XILFPGA
Enable this to include the FDT for the MIPSfpga platform
from Imagination Technologies in the FIT kernel image.
config FIT_IMAGE_FDT_OCELOT_PCB123
bool "Include FDT for Microsemi Ocelot PCB123"
config FIT_IMAGE_FDT_OCELOT
bool "Include FDT for Microsemi Ocelot development platforms"
select MSCC_OCELOT
help
Enable this to include the FDT for the Ocelot PCB123 platform
Enable this to include the FDT for the Ocelot development platforms
from Microsemi in the FIT kernel image.
This requires u-boot on the platform.
......
......@@ -15,5 +15,4 @@ obj-y += proc.o
obj-$(CONFIG_YAMON_DT_SHIM) += yamon-dt.o
obj-$(CONFIG_LEGACY_BOARD_SEAD3) += board-sead3.o
obj-$(CONFIG_LEGACY_BOARD_OCELOT) += board-ocelot.o
obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_VIRT_BOARD_RANCHU) += board-ranchu.o
......@@ -16,5 +16,5 @@ all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb
its-y := vmlinux.its.S
its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S
its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S
its-$(CONFIG_FIT_IMAGE_FDT_OCELOT_PCB123) += board-ocelot_pcb123.its.S
its-$(CONFIG_FIT_IMAGE_FDT_OCELOT) += board-ocelot.its.S
its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += board-xilfpga.its.S
......@@ -11,6 +11,17 @@
algo = "sha1";
};
};
fdt@ocelot_pcb120 {
description = "MSCC Ocelot PCB120 Device Tree";
data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
type = "flat_dt";
arch = "mips";
compression = "none";
hash@0 {
algo = "sha1";
};
};
};
configurations {
......@@ -19,5 +30,11 @@
kernel = "kernel@0";
fdt = "fdt@ocelot_pcb123";
};
conf@ocelot_pcb120 {
description = "Ocelot Linux kernel";
kernel = "kernel@0";
fdt = "fdt@ocelot_pcb120";
};
};
};
/*
* Copyright (C) 2016 Imagination Technologies
* Author: Marcin Nowakowski <marcin.nowakowski@mips.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kexec.h>
#include <linux/libfdt.h>
#include <linux/uaccess.h>
static int generic_kexec_prepare(struct kimage *image)
{
int i;
for (i = 0; i < image->nr_segments; i++) {
struct fdt_header fdt;
if (image->segment[i].memsz <= sizeof(fdt))
continue;
if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
continue;
if (fdt_check_header(&fdt))
continue;
kexec_args[0] = -2;
kexec_args[1] = (unsigned long)
phys_to_virt((unsigned long)image->segment[i].mem);
break;
}
return 0;
}
static int __init register_generic_kexec(void)
{
_machine_kexec_prepare = generic_kexec_prepare;
return 0;
}
arch_initcall(register_generic_kexec);
......@@ -15,6 +15,7 @@
/* Kernel variants */
#define kernel_cache(op, base) "cache " op ", " base "\n"
#define kernel_pref(hint, base) "pref " hint ", " base "\n"
#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
......@@ -51,6 +52,7 @@
" .set pop\n"
#define user_cache(op, base) __BUILD_EVA_INSN("cachee", op, base)
#define user_pref(hint, base) __BUILD_EVA_INSN("prefe", hint, base)
#define user_ll(reg, addr) __BUILD_EVA_INSN("lle", reg, addr)
#define user_sc(reg, addr) __BUILD_EVA_INSN("sce", reg, addr)
#define user_lw(reg, addr) __BUILD_EVA_INSN("lwe", reg, addr)
......@@ -72,6 +74,7 @@
#else
#define user_cache(op, base) kernel_cache(op, base)
#define user_pref(hint, base) kernel_pref(hint, base)
#define user_ll(reg, addr) kernel_ll(reg, addr)
#define user_sc(reg, addr) kernel_sc(reg, addr)
#define user_lw(reg, addr) kernel_lw(reg, addr)
......@@ -99,6 +102,7 @@
#else /* __ASSEMBLY__ */
#define kernel_cache(op, base) cache op, base
#define kernel_pref(hint, base) pref hint, base
#define kernel_ll(reg, addr) ll reg, addr
#define kernel_sc(reg, addr) sc reg, addr
#define kernel_lw(reg, addr) lw reg, addr
......@@ -135,6 +139,7 @@
.set pop;
#define user_cache(op, base) __BUILD_EVA_INSN(cachee, op, base)
#define user_pref(hint, base) __BUILD_EVA_INSN(prefe, hint, base)
#define user_ll(reg, addr) __BUILD_EVA_INSN(lle, reg, addr)
#define user_sc(reg, addr) __BUILD_EVA_INSN(sce, reg, addr)
#define user_lw(reg, addr) __BUILD_EVA_INSN(lwe, reg, addr)
......@@ -155,6 +160,7 @@
#else
#define user_cache(op, base) kernel_cache(op, base)
#define user_pref(hint, base) kernel_pref(hint, base)
#define user_ll(reg, addr) kernel_ll(reg, addr)
#define user_sc(reg, addr) kernel_sc(reg, addr)
#define user_lw(reg, addr) kernel_lw(reg, addr)
......
......@@ -20,32 +20,6 @@
#include <asm/sgidefs.h>
#include <asm/asm-eva.h>
#ifndef CAT
#ifdef __STDC__
#define __CAT(str1, str2) str1##str2
#else
#define __CAT(str1, str2) str1/**/str2
#endif
#define CAT(str1, str2) __CAT(str1, str2)
#endif
/*
* PIC specific declarations
* Not used for the kernel but here seems to be the right place.
*/
#ifdef __PIC__
#define CPRESTORE(register) \
.cprestore register
#define CPADD(register) \
.cpadd register
#define CPLOAD(register) \
.cpload register
#else
#define CPRESTORE(register)
#define CPADD(register)
#define CPLOAD(register)
#endif
/*
* LEAF - declare leaf routine
*/
......@@ -129,96 +103,6 @@ symbol = value
8: .asciiz msg; \
.popsection;
/*
* Build text tables
*/
#define TTABLE(string) \
.pushsection .text; \
.word 1f; \
.popsection \
.pushsection .data; \
1: .asciiz string; \
.popsection
/*
* MIPS IV pref instruction.
* Use with .set noreorder only!
*
* MIPS IV implementations are free to treat this as a nop. The R5000
* is one of them. So we should have an option not to use this instruction.
*/
#ifdef CONFIG_CPU_HAS_PREFETCH
#define PREF(hint,addr) \
.set push; \
.set arch=r5000; \
pref hint, addr; \
.set pop
#define PREFE(hint, addr) \
.set push; \
.set mips0; \
.set eva; \
prefe hint, addr; \
.set pop
#define PREFX(hint,addr) \
.set push; \
.set arch=r5000; \
prefx hint, addr; \
.set pop
#else /* !CONFIG_CPU_HAS_PREFETCH */
#define PREF(hint, addr)
#define PREFE(hint, addr)
#define PREFX(hint, addr)
#endif /* !CONFIG_CPU_HAS_PREFETCH */
/*
* MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs.
*/
#if (_MIPS_ISA == _MIPS_ISA_MIPS1)
#define MOVN(rd, rs, rt) \
.set push; \
.set reorder; \
beqz rt, 9f; \
move rd, rs; \
.set pop; \
9:
#define MOVZ(rd, rs, rt) \
.set push; \
.set reorder; \
bnez rt, 9f; \
move rd, rs; \
.set pop; \
9:
#endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */
#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3)
#define MOVN(rd, rs, rt) \
.set push; \
.set noreorder; \
bnezl rt, 9f; \
move rd, rs; \
.set pop; \
9:
#define MOVZ(rd, rs, rt) \
.set push; \
.set noreorder; \
beqzl rt, 9f; \
move rd, rs; \
.set pop; \
9:
#endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */
#if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
(_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
#define MOVN(rd, rs, rt) \
movn rd, rs, rt
#define MOVZ(rd, rs, rt) \
movz rd, rs, rt
#endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */
/*
* Stack alignment
*/
......
......@@ -20,6 +20,7 @@
#include <linux/irqflags.h>
#include <asm/addrspace.h>
#include <asm/barrier.h>
#include <asm/bug.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
......@@ -33,11 +34,6 @@
#include <ioremap.h>
#include <mangle-port.h>
/*
* Slowdown I/O port space accesses for antique hardware.
*/
#undef CONF_SLOWDOWN_IO
/*
* Raw operations are never swapped in software. OTOH values that raw
* operations are working on may or may not have been swapped by the bus
......@@ -50,6 +46,11 @@
# define __raw_ioswabq(a, x) (x)
# define ____raw_ioswabq(a, x) (x)
# define __relaxed_ioswabb ioswabb
# define __relaxed_ioswabw ioswabw
# define __relaxed_ioswabl ioswabl
# define __relaxed_ioswabq ioswabq
/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
#define IO_SPACE_LIMIT 0xffff
......@@ -80,31 +81,29 @@ static inline void set_io_port_base(unsigned long base)
}
/*
* Thanks to James van Artsdalen for a better timing-fix than
* the two short jumps: using outb's to a nonexistent port seems
* to guarantee better timings even on fast machines.
*
* On the other hand, I'd like to be sure of a non-existent port:
* I feel a bit unsafe about using 0x80 (should be safe, though)
*
* Linus
*
* Provide the necessary definitions for generic iomap. We make use of
* mips_io_port_base for iomap(), but we don't reserve any low addresses for
* use with I/O ports.
*/
#define __SLOW_DOWN_IO \
__asm__ __volatile__( \
"sb\t$0,0x80(%0)" \
: : "r" (mips_io_port_base));
#define HAVE_ARCH_PIO_SIZE
#define PIO_OFFSET mips_io_port_base
#define PIO_MASK IO_SPACE_LIMIT
#define PIO_RESERVED 0x0UL
#ifdef CONF_SLOWDOWN_IO
#ifdef REALLY_SLOW_IO
#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
#else
#define SLOW_DOWN_IO __SLOW_DOWN_IO
#endif
#else
#define SLOW_DOWN_IO
#endif
/*
* Enforce in-order execution of data I/O. In the MIPS architecture
* these are equivalent to corresponding platform-specific memory
* barriers defined in <asm/barrier.h>. API pinched from PowerPC,
* with sync additionally defined.
*/
#define iobarrier_rw() mb()
#define iobarrier_r() rmb()
#define iobarrier_w() wmb()
#define iobarrier_sync() iob()
/* Some callers use this older API instead. */
#define mmiowb() iobarrier_w()
/*
* virt_to_phys - map virtual addresses to physical
......@@ -172,11 +171,6 @@ static inline void *isa_bus_to_virt(unsigned long address)
extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags);
extern void __iounmap(const volatile void __iomem *addr);
#ifndef CONFIG_PCI
struct pci_dev;
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
#endif
static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size,
unsigned long flags)
{
......@@ -316,13 +310,13 @@ static inline void iounmap(const volatile void __iomem *addr)
#undef __IS_KSEG1
}
#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT)
#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON3)
#define war_io_reorder_wmb() wmb()
#else
#define war_io_reorder_wmb() barrier()
#endif
#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
\
static inline void pfx##write##bwlq(type val, \
volatile void __iomem *mem) \
......@@ -330,6 +324,9 @@ static inline void pfx##write##bwlq(type val, \
volatile type *__mem; \
type __val; \
\
if (barrier) \
iobarrier_rw(); \
else \
war_io_reorder_wmb(); \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
......@@ -367,6 +364,9 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
if (barrier) \
iobarrier_rw(); \
\
if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
__val = *__mem; \
else if (cpu_has_64bits) { \
......@@ -390,17 +390,21 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
} \
\
/* prevent prefetching of coherent DMA data prematurely */ \
if (!relax) \
rmb(); \
return pfx##ioswab##bwlq(__mem, __val); \
}
#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
\
static inline void pfx##out##bwlq##p(type val, unsigned long port) \
{ \
volatile type *__addr; \
type __val; \
\
if (barrier) \
iobarrier_rw(); \
else \
war_io_reorder_wmb(); \
\
__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
......@@ -411,7 +415,6 @@ static inline void pfx##out##bwlq##p(type val, unsigned long port) \
BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
\
*__addr = __val; \
slow; \
} \
\
static inline type pfx##in##bwlq##p(unsigned long port) \
......@@ -423,23 +426,27 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
\
BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
\
if (barrier) \
iobarrier_rw(); \
\
__val = *__addr; \
slow; \
\
/* prevent prefetching of coherent DMA data prematurely */ \
if (!relax) \
rmb(); \
return pfx##ioswab##bwlq(__addr, __val); \
}
#define __BUILD_MEMORY_PFX(bus, bwlq, type) \
#define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
\
__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
#define BUILDIO_MEM(bwlq, type) \
\
__BUILD_MEMORY_PFX(__raw_, bwlq, type) \
__BUILD_MEMORY_PFX(, bwlq, type) \
__BUILD_MEMORY_PFX(__mem_, bwlq, type) \
__BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
__BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
__BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
__BUILD_MEMORY_PFX(, bwlq, type, 0)
BUILDIO_MEM(b, u8)
BUILDIO_MEM(w, u16)
......@@ -447,8 +454,8 @@ BUILDIO_MEM(l, u32)
BUILDIO_MEM(q, u64)
#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
__BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
__BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
__BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
__BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
#define BUILDIO_IOPORT(bwlq, type) \
__BUILD_IOPORT_PFX(, bwlq, type) \
......@@ -463,19 +470,19 @@ BUILDIO_IOPORT(q, u64)
#define __BUILDIO(bwlq, type) \
\
__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0)
__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
__BUILDIO(q, u64)
#define readb_relaxed readb
#define readw_relaxed readw
#define readl_relaxed readl
#define readq_relaxed readq
#define readb_relaxed __relaxed_readb
#define readw_relaxed __relaxed_readw
#define readl_relaxed __relaxed_readl
#define readq_relaxed __relaxed_readq
#define writeb_relaxed writeb
#define writew_relaxed writew
#define writel_relaxed writel
#define writeq_relaxed writeq
#define writeb_relaxed __relaxed_writeb
#define writew_relaxed __relaxed_writew
#define writel_relaxed __relaxed_writel
#define writeq_relaxed __relaxed_writeq
#define readb_be(addr) \
__raw_readb((__force unsigned *)(addr))
......@@ -561,14 +568,6 @@ BUILDSTRING(l, u32)
BUILDSTRING(q, u64)
#endif
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#define mmiowb() wmb()
#else
/* Depends on MIPS II instruction set */
#define mmiowb() asm volatile ("sync" ::: "memory")
#endif
static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
memset((void __force *) addr, val, count);
......
......@@ -12,11 +12,11 @@
#include <asm/stacktrace.h>
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000)
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000)
#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
/* Reserve 3*4096 bytes for board-specific info */
#define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096)
......@@ -39,11 +39,12 @@ extern unsigned long kexec_args[4];
extern int (*_machine_kexec_prepare)(struct kimage *);
extern void (*_machine_kexec_shutdown)(void);
extern void (*_machine_crash_shutdown)(struct pt_regs *regs);
extern void default_machine_crash_shutdown(struct pt_regs *regs);
void default_machine_crash_shutdown(struct pt_regs *regs);
void kexec_nonboot_cpu_jump(void);
void kexec_reboot(void);
#ifdef CONFIG_SMP
extern const unsigned char kexec_smp_wait[];
extern unsigned long secondary_kexec_args[4];
extern void (*relocated_kexec_smp_wait) (void *);
extern atomic_t kexec_ready_to_reboot;
extern void (*_crash_smp_send_stop)(void);
#endif
......
......@@ -10,7 +10,7 @@
#define MIPS_CPU_IRQ_BASE 56
#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */
#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */
#define LOONGSON_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 3) /* CASCADE */
#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */
#define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base
......
......@@ -11,6 +11,8 @@
#ifndef __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H
#define __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H
#include <asm/cpu.h>
/*
* Override macros used in arch/mips/kernel/head.S.
*/
......@@ -26,12 +28,15 @@
mfc0 t0, CP0_PAGEGRAIN
or t0, (0x1 << 29)
mtc0 t0, CP0_PAGEGRAIN
#ifdef CONFIG_LOONGSON3_ENHANCEMENT
/* Enable STFill Buffer */
mfc0 t0, CP0_PRID
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2)
bnez t0, 1f
mfc0 t0, CP0_CONFIG6
or t0, 0x100
mtc0 t0, CP0_CONFIG6
#endif
1:
_ehb
.set pop
#endif
......@@ -52,12 +57,15 @@
mfc0 t0, CP0_PAGEGRAIN
or t0, (0x1 << 29)
mtc0 t0, CP0_PAGEGRAIN
#ifdef CONFIG_LOONGSON3_ENHANCEMENT
/* Enable STFill Buffer */
mfc0 t0, CP0_PRID
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2)
bnez t0, 1f
mfc0 t0, CP0_CONFIG6
or t0, 0x100
mtc0 t0, CP0_CONFIG6
#endif
1:
_ehb
.set pop
#endif
......
......@@ -2287,13 +2287,14 @@ do { \
_write_32bit_cp1_register(dest, val, )
#endif
#ifdef HAVE_AS_DSP
#ifdef TOOLCHAIN_SUPPORTS_DSP
#define rddsp(mask) \
({ \
unsigned int __dspctl; \
\
__asm__ __volatile__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" rddsp %0, %x1 \n" \
" .set pop \n" \
......@@ -2306,6 +2307,7 @@ do { \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" wrdsp %0, %x1 \n" \
" .set pop \n" \
......@@ -2318,6 +2320,7 @@ do { \
long mflo0; \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac0 \n" \
" .set pop \n" \
......@@ -2330,6 +2333,7 @@ do { \
long mflo1; \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac1 \n" \
" .set pop \n" \
......@@ -2342,6 +2346,7 @@ do { \
long mflo2; \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac2 \n" \
" .set pop \n" \
......@@ -2354,6 +2359,7 @@ do { \
long mflo3; \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac3 \n" \
" .set pop \n" \
......@@ -2366,6 +2372,7 @@ do { \
long mfhi0; \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac0 \n" \
" .set pop \n" \
......@@ -2378,6 +2385,7 @@ do { \
long mfhi1; \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac1 \n" \
" .set pop \n" \
......@@ -2390,6 +2398,7 @@ do { \
long mfhi2; \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac2 \n" \
" .set pop \n" \
......@@ -2402,6 +2411,7 @@ do { \
long mfhi3; \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac3 \n" \
" .set pop \n" \
......@@ -2414,6 +2424,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac0 \n" \
" .set pop \n" \
......@@ -2425,6 +2436,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac1 \n" \
" .set pop \n" \
......@@ -2436,6 +2448,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac2 \n" \
" .set pop \n" \
......@@ -2447,6 +2460,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac3 \n" \
" .set pop \n" \
......@@ -2458,6 +2472,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac0 \n" \
" .set pop \n" \
......@@ -2469,6 +2484,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac1 \n" \
" .set pop \n" \
......@@ -2480,6 +2496,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac2 \n" \
" .set pop \n" \
......@@ -2491,6 +2508,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac3 \n" \
" .set pop \n" \
......
......@@ -48,58 +48,14 @@ extern void (*r4k_blast_icache)(void);
: \
: "i" (op), "R" (*(unsigned char *)(addr)))
#ifdef CONFIG_MIPS_MT
#define __iflush_prologue \
unsigned long redundance; \
extern int mt_n_iflushes; \
for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
#define __iflush_epilogue \
}
#define __dflush_prologue \
unsigned long redundance; \
extern int mt_n_dflushes; \
for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
#define __dflush_epilogue \
}
#define __inv_dflush_prologue __dflush_prologue
#define __inv_dflush_epilogue __dflush_epilogue
#define __sflush_prologue {
#define __sflush_epilogue }
#define __inv_sflush_prologue __sflush_prologue
#define __inv_sflush_epilogue __sflush_epilogue
#else /* CONFIG_MIPS_MT */
#define __iflush_prologue {
#define __iflush_epilogue }
#define __dflush_prologue {
#define __dflush_epilogue }
#define __inv_dflush_prologue {
#define __inv_dflush_epilogue }
#define __sflush_prologue {
#define __sflush_epilogue }
#define __inv_sflush_prologue {
#define __inv_sflush_epilogue }
#endif /* CONFIG_MIPS_MT */
static inline void flush_icache_line_indexed(unsigned long addr)
{
__iflush_prologue
cache_op(Index_Invalidate_I, addr);
__iflush_epilogue
}
static inline void flush_dcache_line_indexed(unsigned long addr)
{
__dflush_prologue
cache_op(Index_Writeback_Inv_D, addr);
__dflush_epilogue
}
static inline void flush_scache_line_indexed(unsigned long addr)
......@@ -109,7 +65,6 @@ static inline void flush_scache_line_indexed(unsigned long addr)
static inline void flush_icache_line(unsigned long addr)
{
__iflush_prologue
switch (boot_cpu_type()) {
case CPU_LOONGSON2:
cache_op(Hit_Invalidate_I_Loongson2, addr);
......@@ -119,21 +74,16 @@ static inline void flush_icache_line(unsigned long addr)
cache_op(Hit_Invalidate_I, addr);
break;
}
__iflush_epilogue
}
static inline void flush_dcache_line(unsigned long addr)
{
__dflush_prologue
cache_op(Hit_Writeback_Inv_D, addr);
__dflush_epilogue
}
static inline void invalidate_dcache_line(unsigned long addr)
{
__dflush_prologue
cache_op(Hit_Invalidate_D, addr);
__dflush_epilogue
}
static inline void invalidate_scache_line(unsigned long addr)
......@@ -586,13 +536,9 @@ static inline void extra##blast_##pfx##cache##lsize(void) \
current_cpu_data.desc.waybit; \
unsigned long ws, addr; \
\
__##pfx##flush_prologue \
\
for (ws = 0; ws < ws_end; ws += ws_inc) \
for (addr = start; addr < end; addr += lsize * 32) \
cache##lsize##_unroll32(addr|ws, indexop); \
\
__##pfx##flush_epilogue \
} \
\
static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
......@@ -600,14 +546,10 @@ static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
unsigned long start = page; \
unsigned long end = page + PAGE_SIZE; \
\
__##pfx##flush_prologue \
\
do { \
cache##lsize##_unroll32(start, hitop); \
start += lsize * 32; \
} while (start < end); \
\
__##pfx##flush_epilogue \
} \
\
static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
......@@ -620,13 +562,9 @@ static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long
current_cpu_data.desc.waybit; \
unsigned long ws, addr; \
\
__##pfx##flush_prologue \
\
for (ws = 0; ws < ws_end; ws += ws_inc) \
for (addr = start; addr < end; addr += lsize * 32) \
cache##lsize##_unroll32(addr|ws, indexop); \
\
__##pfx##flush_epilogue \
}
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
......@@ -656,14 +594,10 @@ static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
unsigned long start = page; \
unsigned long end = page + PAGE_SIZE; \
\
__##pfx##flush_prologue \
\
do { \
cache##lsize##_unroll32_user(start, hitop); \
start += lsize * 32; \
} while (start < end); \
\
__##pfx##flush_epilogue \
}
__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
......@@ -685,16 +619,12 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
unsigned long addr = start & ~(lsize - 1); \
unsigned long aend = (end - 1) & ~(lsize - 1); \
\
__##pfx##flush_prologue \
\
while (1) { \
prot##cache_op(hitop, addr); \
if (addr == aend) \
break; \
addr += lsize; \
} \
\
__##pfx##flush_epilogue \
}
#ifndef CONFIG_EVA
......@@ -712,8 +642,6 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
unsigned long addr = start & ~(lsize - 1); \
unsigned long aend = (end - 1) & ~(lsize - 1); \
\
__##pfx##flush_prologue \
\
if (!uaccess_kernel()) { \
while (1) { \
protected_cachee_op(hitop, addr); \
......@@ -730,7 +658,6 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
} \
\
} \
__##pfx##flush_epilogue \
}
__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
......
......@@ -33,6 +33,9 @@ struct plat_smp_ops {
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);
#endif
#ifdef CONFIG_KEXEC
void (*kexec_nonboot_cpu)(void);
#endif
};
extern void register_smp_ops(const struct plat_smp_ops *ops);
......
......@@ -91,6 +91,22 @@ static inline void __cpu_die(unsigned int cpu)
extern void play_dead(void);
#endif
#ifdef CONFIG_KEXEC
static inline void kexec_nonboot_cpu(void)
{
extern const struct plat_smp_ops *mp_ops; /* private */
return mp_ops->kexec_nonboot_cpu();
}
static inline void *kexec_nonboot_cpu_func(void)
{
extern const struct plat_smp_ops *mp_ops; /* private */
return mp_ops->kexec_nonboot_cpu;
}
#endif
/*
* This function will set up the necessary IPIs for Linux to communicate
* with the CPUs in mask.
......
......@@ -113,22 +113,4 @@ obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
obj-$(CONFIG_CPU_PM) += pm.o
obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
#
# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
# here because the compiler may use DSP ASE instructions (such as lwx) in
# code paths where we cannot check that the CPU we are running on supports it.
# Proper abstraction using HAVE_AS_DSP and macros is done in
# arch/mips/include/asm/mipsregs.h.
#
ifeq ($(CONFIG_CPU_MIPSR2), y)
CFLAGS_DSP = -DHAVE_AS_DSP
CFLAGS_signal.o = $(CFLAGS_DSP)
CFLAGS_signal32.o = $(CFLAGS_DSP)
CFLAGS_process.o = $(CFLAGS_DSP)
CFLAGS_branch.o = $(CFLAGS_DSP)
CFLAGS_ptrace.o = $(CFLAGS_DSP)
endif
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
......@@ -36,6 +36,9 @@ static void crash_shutdown_secondary(void *passed_regs)
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu);
......@@ -43,7 +46,9 @@ static void crash_shutdown_secondary(void *passed_regs)
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
relocated_kexec_smp_wait(NULL);
kexec_reboot();
/* NOTREACHED */
}
......
......@@ -77,7 +77,7 @@ EXPORT(_stext)
*/
FEXPORT(__kernel_entry)
j kernel_entry
#endif
#endif /* CONFIG_BOOT_RAW */
__REF
......@@ -94,24 +94,26 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
0:
#ifdef CONFIG_USE_OF
#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
#if defined(CONFIG_MIPS_RAW_APPENDED_DTB) || \
defined(CONFIG_MIPS_ELF_APPENDED_DTB)
PTR_LA t2, __appended_dtb
#ifdef CONFIG_CPU_BIG_ENDIAN
li t1, 0xd00dfeed
#else
#else /* !CONFIG_CPU_BIG_ENDIAN */
li t1, 0xedfe0dd0
#endif
#endif /* !CONFIG_CPU_BIG_ENDIAN */
lw t0, (t2)
beq t0, t1, dtb_found
#endif
#endif /* CONFIG_MIPS_RAW_APPENDED_DTB || CONFIG_MIPS_ELF_APPENDED_DTB */
li t1, -2
move t2, a1
beq a0, t1, dtb_found
li t2, 0
dtb_found:
#endif
#endif /* CONFIG_USE_OF */
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
......@@ -156,9 +158,9 @@ dtb_found:
* newly sync'd icache.
*/
jr.hb v0
#else
#else /* !CONFIG_RELOCATABLE */
j start_kernel
#endif
#endif /* !CONFIG_RELOCATABLE */
END(kernel_entry)
#ifdef CONFIG_SMP
......
......@@ -9,6 +9,7 @@
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/libfdt.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
......@@ -19,15 +20,18 @@ extern const size_t relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
int (*_machine_kexec_prepare)(struct kimage *) = NULL;
void (*_machine_kexec_shutdown)(void) = NULL;
void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
static unsigned long reboot_code_buffer;
#ifdef CONFIG_SMP
void (*relocated_kexec_smp_wait) (void *);
static void (*relocated_kexec_smp_wait)(void *);
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
void (*_crash_smp_send_stop)(void) = NULL;
#endif
void (*_machine_kexec_shutdown)(void) = NULL;
void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
static void kexec_image_info(const struct kimage *kimage)
{
unsigned long i;
......@@ -48,13 +52,59 @@ static void kexec_image_info(const struct kimage *kimage)
}
}
#ifdef CONFIG_UHI_BOOT
static int uhi_machine_kexec_prepare(struct kimage *kimage)
{
int i;
/*
* In case DTB file is not passed to the new kernel, a flat device
* tree will be created by kexec tool. It holds modified command
* line for the new kernel.
*/
for (i = 0; i < kimage->nr_segments; i++) {
struct fdt_header fdt;
if (kimage->segment[i].memsz <= sizeof(fdt))
continue;
if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt)))
continue;
if (fdt_check_header(&fdt))
continue;
kexec_args[0] = -2;
kexec_args[1] = (unsigned long)
phys_to_virt((unsigned long)kimage->segment[i].mem);
break;
}
return 0;
}
int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare;
#else
int (*_machine_kexec_prepare)(struct kimage *) = NULL;
#endif /* CONFIG_UHI_BOOT */
int
machine_kexec_prepare(struct kimage *kimage)
{
#ifdef CONFIG_SMP
if (!kexec_nonboot_cpu_func())
return -EINVAL;
#endif
kexec_image_info(kimage);
if (_machine_kexec_prepare)
return _machine_kexec_prepare(kimage);
return 0;
}
......@@ -63,11 +113,41 @@ machine_kexec_cleanup(struct kimage *kimage)
{
}
#ifdef CONFIG_SMP
static void kexec_shutdown_secondary(void *param)
{
int cpu = smp_processor_id();
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
kexec_reboot();
/* NOTREACHED */
}
#endif
void
machine_shutdown(void)
{
if (_machine_kexec_shutdown)
_machine_kexec_shutdown();
#ifdef CONFIG_SMP
smp_call_function(kexec_shutdown_secondary, NULL, 0);
while (num_online_cpus() > 1) {
cpu_relax();
mdelay(1);
}
#endif
}
void
......@@ -79,12 +159,57 @@ machine_crash_shutdown(struct pt_regs *regs)
default_machine_crash_shutdown(regs);
}
typedef void (*noretfun_t)(void) __noreturn;
#ifdef CONFIG_SMP
void kexec_nonboot_cpu_jump(void)
{
local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
reboot_code_buffer + relocate_new_kernel_size);
relocated_kexec_smp_wait(NULL);
}
#endif
void kexec_reboot(void)
{
void (*do_kexec)(void) __noreturn;
/*
* We know we were online, and there will be no incoming IPIs at
* this point. Mark online again before rebooting so that the crash
* analysis tool will see us correctly.
*/
set_cpu_online(smp_processor_id(), true);
/* Ensure remote CPUs observe that we're online before rebooting. */
smp_mb__after_atomic();
#ifdef CONFIG_SMP
if (smp_processor_id() > 0) {
/*
* Instead of cpu_relax() or wait, this is needed for kexec
* smp reboot. Kdump usually doesn't require an smp new
* kernel, but kexec may do.
*/
kexec_nonboot_cpu();
/* NOTREACHED */
}
#endif
/*
* Make sure we get correct instructions written by the
* machine_kexec() CPU.
*/
local_flush_icache_range(reboot_code_buffer,
reboot_code_buffer + relocate_new_kernel_size);
do_kexec = (void *)reboot_code_buffer;
do_kexec();
}
void
machine_kexec(struct kimage *image)
{
unsigned long reboot_code_buffer;
unsigned long entry;
unsigned long *ptr;
......@@ -118,6 +243,9 @@ machine_kexec(struct kimage *image)
*ptr = (unsigned long) phys_to_virt(*ptr);
}
/* Mark offline BEFORE disabling local irq. */
set_cpu_online(smp_processor_id(), false);
/*
* we do not want to be bothered.
*/
......@@ -125,6 +253,7 @@ machine_kexec(struct kimage *image)
printk("Will call new kernel at %08lx\n", image->start);
printk("Bye ...\n");
/* Make reboot code buffer available to the boot CPU. */
__flush_cache_all();
#ifdef CONFIG_SMP
/* All secondary cpus now may jump to kexec_wait cycle */
......@@ -133,5 +262,5 @@ machine_kexec(struct kimage *image)
smp_wmb();
atomic_set(&kexec_ready_to_reboot, 1);
#endif
((noretfun_t) reboot_code_buffer)();
kexec_reboot();
}
......@@ -154,40 +154,6 @@ static int __init config7_set(char *str)
}
__setup("config7=", config7_set);
/* Experimental cache flush control parameters that should go away some day */
int mt_protiflush;
int mt_protdflush;
int mt_n_iflushes = 1;
int mt_n_dflushes = 1;
static int __init set_protiflush(char *s)
{
mt_protiflush = 1;
return 1;
}
__setup("protiflush", set_protiflush);
static int __init set_protdflush(char *s)
{
mt_protdflush = 1;
return 1;
}
__setup("protdflush", set_protdflush);
static int __init niflush(char *s)
{
get_option(&s, &mt_n_iflushes);
return 1;
}
__setup("niflush=", niflush);
static int __init ndflush(char *s)
{
get_option(&s, &mt_n_dflushes);
return 1;
}
__setup("ndflush=", ndflush);
static unsigned int itc_base;
static int __init set_itc_base(char *str)
......@@ -232,16 +198,6 @@ void mips_mt_set_cpuoptions(void)
printk("Config7: 0x%08x\n", read_c0_config7());
}
/* Report Cache management debug options */
if (mt_protiflush)
printk("I-cache flushes single-threaded\n");
if (mt_protdflush)
printk("D-cache flushes single-threaded\n");
if (mt_n_iflushes != 1)
printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
if (mt_n_dflushes != 1)
printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
if (itc_base != 0) {
/*
* Configure ITC mapping. This code is very
......@@ -283,21 +239,6 @@ void mips_mt_set_cpuoptions(void)
}
}
/*
* Function to protect cache flushes from concurrent execution
* depends on MP software model chosen.
*/
void mt_cflush_lockdown(void)
{
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
void mt_cflush_release(void)
{
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
struct class *mt_class;
static int __init mt_init(void)
......
......@@ -146,7 +146,7 @@ int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
break;
type = (*r >> 24) & 0xff;
loc_orig = (void *)(kbase_old + ((*r & 0x00ffffff) << 2));
loc_orig = kbase_old + ((*r & 0x00ffffff) << 2);
loc_new = RELOCATED(loc_orig);
if (reloc_handlers_rel[type] == NULL) {
......
......@@ -333,7 +333,7 @@ static void __init finalize_initrd(void)
maybe_bswap_initrd();
reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
memblock_reserve(__pa(initrd_start), size);
initrd_below_start_ok = 1;
pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
......@@ -370,20 +370,10 @@ static void __init bootmem_init(void)
#else /* !CONFIG_SGI_IP27 */
static unsigned long __init bootmap_bytes(unsigned long pages)
{
unsigned long bytes = DIV_ROUND_UP(pages, 8);
return ALIGN(bytes, sizeof(long));
}
static void __init bootmem_init(void)
{
unsigned long reserved_end;
unsigned long mapstart = ~0UL;
unsigned long bootmap_size;
phys_addr_t ramstart = PHYS_ADDR_MAX;
bool bootmap_valid = false;
int i;
/*
......@@ -395,6 +385,8 @@ static void __init bootmem_init(void)
init_initrd();
reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT);
/*
* max_low_pfn is not a number of pages. The number of pages
* of the system is given by 'max_low_pfn - min_low_pfn'.
......@@ -442,9 +434,6 @@ static void __init bootmem_init(void)
if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
continue;
#endif
if (start >= mapstart)
continue;
mapstart = max(reserved_end, start);
}
if (min_low_pfn >= max_low_pfn)
......@@ -456,9 +445,11 @@ static void __init bootmem_init(void)
/*
* Reserve any memory between the start of RAM and PHYS_OFFSET
*/
if (ramstart > PHYS_OFFSET)
if (ramstart > PHYS_OFFSET) {
add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
BOOT_MEM_RESERVED);
memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
}
if (min_low_pfn > ARCH_PFN_OFFSET) {
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
......@@ -483,52 +474,6 @@ static void __init bootmem_init(void)
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
#ifdef CONFIG_BLK_DEV_INITRD
/*
* mapstart should be after initrd_end
*/
if (initrd_end)
mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
#endif
/*
* check that mapstart doesn't overlap with any of
* memory regions that have been reserved through eg. DTB
*/
bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn);
bootmap_valid = memory_region_available(PFN_PHYS(mapstart),
bootmap_size);
for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) {
unsigned long mapstart_addr;
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RESERVED:
mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr +
boot_mem_map.map[i].size);
if (PHYS_PFN(mapstart_addr) < mapstart)
break;
bootmap_valid = memory_region_available(mapstart_addr,
bootmap_size);
if (bootmap_valid)
mapstart = PHYS_PFN(mapstart_addr);
break;
default:
break;
}
}
if (!bootmap_valid)
panic("No memory area to place a bootmap bitmap");
/*
* Initialize the boot-time allocator with low memory only.
*/
if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart,
min_low_pfn, max_low_pfn))
panic("Unexpected memory size required for bootmap");
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
......@@ -577,9 +522,9 @@ static void __init bootmem_init(void)
default:
/* Not usable memory */
if (start > min_low_pfn && end < max_low_pfn)
reserve_bootmem(boot_mem_map.map[i].addr,
boot_mem_map.map[i].size,
BOOTMEM_DEFAULT);
memblock_reserve(boot_mem_map.map[i].addr,
boot_mem_map.map[i].size);
continue;
}
......@@ -602,15 +547,9 @@ static void __init bootmem_init(void)
size = end - start;
/* Register lowmem ranges */
free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
memory_present(0, start, end);
}
/*
* Reserve the bootmap memory.
*/
reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
#ifdef CONFIG_RELOCATABLE
/*
* The kernel reserves all memory below its _end symbol as bootmem,
......@@ -642,29 +581,6 @@ static void __init bootmem_init(void)
#endif /* CONFIG_SGI_IP27 */
/*
* arch_mem_init - initialize memory management subsystem
*
* o plat_mem_setup() detects the memory configuration and will record detected
* memory areas using add_memory_region.
*
* At this stage the memory configuration of the system is known to the
* kernel but generic memory management system is still entirely uninitialized.
*
* o bootmem_init()
* o sparse_init()
* o paging_init()
* o dma_contiguous_reserve()
*
* At this stage the bootmem allocator is ready to use.
*
* NOTE: historically plat_mem_setup did the entire platform initialization.
* This was rather impractical because it meant plat_mem_setup had to
* get away without any kind of memory allocator. To keep old code from
* breaking plat_setup was just renamed to plat_mem_setup and a second platform
* initialization hook for anything else was introduced.
*/
static int usermem __initdata;
static int __init early_parse_mem(char *p)
......@@ -841,6 +757,28 @@ static void __init request_crashkernel(struct resource *res)
#define BUILTIN_EXTEND_WITH_PROM \
IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
/*
* arch_mem_init - initialize memory management subsystem
*
* o plat_mem_setup() detects the memory configuration and will record detected
* memory areas using add_memory_region.
*
* At this stage the memory configuration of the system is known to the
* kernel but generic memory management system is still entirely uninitialized.
*
* o bootmem_init()
* o sparse_init()
* o paging_init()
* o dma_contiguous_reserve()
*
* At this stage the bootmem allocator is ready to use.
*
* NOTE: historically plat_mem_setup did the entire platform initialization.
* This was rather impractical because it meant plat_mem_setup had to
* get away without any kind of memory allocator. To keep old code from
* breaking plat_setup was just renamed to plat_mem_setup and a second platform
* initialization hook for anything else was introduced.
*/
static void __init arch_mem_init(char **cmdline_p)
{
struct memblock_region *reg;
......@@ -916,21 +854,29 @@ static void __init arch_mem_init(char **cmdline_p)
early_init_fdt_scan_reserved_mem();
bootmem_init();
/*
* Prevent memblock from allocating high memory.
* This cannot be done before max_low_pfn is detected, so up
* to this point is possible to only reserve physical memory
* with memblock_reserve; memblock_virt_alloc* can be used
* only after this point
*/
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
#ifdef CONFIG_PROC_VMCORE
if (setup_elfcorehdr && setup_elfcorehdr_size) {
printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
setup_elfcorehdr, setup_elfcorehdr_size);
reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
BOOTMEM_DEFAULT);
memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
}
#endif
mips_parse_crashkernel();
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
reserve_bootmem(crashk_res.start,
crashk_res.end - crashk_res.start + 1,
BOOTMEM_DEFAULT);
memblock_reserve(crashk_res.start,
crashk_res.end - crashk_res.start + 1);
#endif
device_tree_init();
sparse_init();
......@@ -940,7 +886,7 @@ static void __init arch_mem_init(char **cmdline_p)
/* Tell bootmem about cma reserved memblock section */
for_each_memblock(reserved, reg)
if (reg->size != 0)
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
memblock_reserve(reg->base, reg->size);
reserve_bootmem_region(__pa_symbol(&__nosave_begin),
__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
......
......@@ -25,6 +25,7 @@
#include <linux/linkage.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <asm/time.h>
#include <asm/pgtable.h>
......@@ -423,6 +424,9 @@ const struct plat_smp_ops bmips43xx_smp_ops = {
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
const struct plat_smp_ops bmips5000_smp_ops = {
......@@ -437,6 +441,9 @@ const struct plat_smp_ops bmips5000_smp_ops = {
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
#endif /* CONFIG_SMP */
......
......@@ -398,6 +398,55 @@ static void cps_smp_finish(void)
local_irq_enable();
}
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
enum cpu_death {
CPU_DEATH_HALT,
CPU_DEATH_POWER,
};
static void cps_shutdown_this_cpu(enum cpu_death death)
{
unsigned int cpu, core, vpe_id;
cpu = smp_processor_id();
core = cpu_core(&cpu_data[cpu]);
if (death == CPU_DEATH_HALT) {
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
pr_debug("Halting core %d VP%d\n", core, vpe_id);
if (cpu_has_mipsmt) {
/* Halt this TC */
write_c0_tchalt(TCHALT_H);
instruction_hazard();
} else if (cpu_has_vp) {
write_cpc_cl_vp_stop(1 << vpe_id);
/* Ensure that the VP_STOP register is written */
wmb();
}
} else {
pr_debug("Gating power to core %d\n", core);
/* Power down the core */
cps_pm_enter_state(CPS_PM_POWER_GATED);
}
}
#ifdef CONFIG_KEXEC
static void cps_kexec_nonboot_cpu(void)
{
if (cpu_has_mipsmt || cpu_has_vp)
cps_shutdown_this_cpu(CPU_DEATH_HALT);
else
cps_shutdown_this_cpu(CPU_DEATH_POWER);
}
#endif /* CONFIG_KEXEC */
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
#ifdef CONFIG_HOTPLUG_CPU
static int cps_cpu_disable(void)
......@@ -421,19 +470,15 @@ static int cps_cpu_disable(void)
}
static unsigned cpu_death_sibling;
static enum {
CPU_DEATH_HALT,
CPU_DEATH_POWER,
} cpu_death;
static enum cpu_death cpu_death;
void play_dead(void)
{
unsigned int cpu, core, vpe_id;
unsigned int cpu;
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
core = cpu_core(&cpu_data[cpu]);
cpu_death = CPU_DEATH_POWER;
pr_debug("CPU%d going offline\n", cpu);
......@@ -456,25 +501,7 @@ void play_dead(void)
/* This CPU has chosen its way out */
(void)cpu_report_death();
if (cpu_death == CPU_DEATH_HALT) {
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
pr_debug("Halting core %d VP%d\n", core, vpe_id);
if (cpu_has_mipsmt) {
/* Halt this TC */
write_c0_tchalt(TCHALT_H);
instruction_hazard();
} else if (cpu_has_vp) {
write_cpc_cl_vp_stop(1 << vpe_id);
/* Ensure that the VP_STOP register is written */
wmb();
}
} else {
pr_debug("Gating power to core %d\n", core);
/* Power down the core */
cps_pm_enter_state(CPS_PM_POWER_GATED);
}
cps_shutdown_this_cpu(cpu_death);
/* This should never be reached */
panic("Failed to offline CPU %u", cpu);
......@@ -593,6 +620,9 @@ static const struct plat_smp_ops cps_smp_ops = {
.cpu_disable = cps_cpu_disable,
.cpu_die = cps_cpu_die,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
#endif
};
bool mips_cps_smp_in_use(void)
......
......@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/kgdb.h>
......@@ -348,7 +349,7 @@ static void __show_regs(const struct pt_regs *regs)
*/
void show_regs(struct pt_regs *regs)
{
__show_regs((struct pt_regs *)regs);
__show_regs(regs);
dump_stack();
}
......@@ -2260,8 +2261,10 @@ void __init trap_init(void)
unsigned long size = 0x200 + VECTORSPACING*64;
phys_addr_t ebase_pa;
memblock_set_bottom_up(true);
ebase = (unsigned long)
__alloc_bootmem(size, 1 << fls(size), 0);
memblock_set_bottom_up(false);
/*
* Try to ensure ebase resides in KSeg0 if possible.
......
......@@ -130,7 +130,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#ifndef CONFIG_CPU_MIPSR6
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -151,8 +151,8 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#else
/* MIPSR6 has no lwl instruction */
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
/* For CPUs without lwl instruction */
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -186,7 +186,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#endif /* CONFIG_CPU_MIPSR6 */
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define _LoadHWU(addr, value, res, type) \
do { \
......@@ -212,7 +212,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#ifndef CONFIG_CPU_MIPSR6
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -255,8 +255,8 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#else
/* MIPSR6 has not lwl and ldl instructions */
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
/* For CPUs without lwl and ldl instructions */
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -339,7 +339,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#endif /* CONFIG_CPU_MIPSR6 */
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define _StoreHW(addr, value, res, type) \
......@@ -365,7 +365,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT));\
} while(0)
#ifndef CONFIG_CPU_MIPSR6
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -406,8 +406,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
} while(0)
#else
/* MIPSR6 has no swl and sdl instructions */
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -483,7 +482,7 @@ do { \
: "memory"); \
} while(0)
#endif /* CONFIG_CPU_MIPSR6 */
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#else /* __BIG_ENDIAN */
......@@ -509,7 +508,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#ifndef CONFIG_CPU_MIPSR6
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -530,8 +529,8 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#else
/* MIPSR6 has no lwl instruction */
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
/* For CPUs without lwl instruction */
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -565,7 +564,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#endif /* CONFIG_CPU_MIPSR6 */
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define _LoadHWU(addr, value, res, type) \
......@@ -592,7 +591,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#ifndef CONFIG_CPU_MIPSR6
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -635,8 +634,8 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#else
/* MIPSR6 has not lwl and ldl instructions */
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
/* For CPUs without lwl and ldl instructions */
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -718,7 +717,7 @@ do { \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
#endif /* CONFIG_CPU_MIPSR6 */
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define _StoreHW(addr, value, res, type) \
do { \
......@@ -743,7 +742,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT));\
} while(0)
#ifndef CONFIG_CPU_MIPSR6
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -784,8 +783,8 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
} while(0)
#else
/* MIPSR6 has no swl and sdl instructions */
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
/* For CPUs without swl and sdl instructions */
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
......@@ -861,7 +860,7 @@ do { \
: "memory"); \
} while(0)
#endif /* CONFIG_CPU_MIPSR6 */
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#endif
#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
......
......@@ -7,7 +7,7 @@ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
mips-atomic.o strncpy_user.o \
strnlen_user.o uncached.o
obj-y += iomap.o iomap_copy.o
obj-y += iomap_copy.o
obj-$(CONFIG_PCI) += iomap-pci.o
lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y))
......
......@@ -44,10 +44,3 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev,
}
#endif /* CONFIG_PCI_DRIVERS_LEGACY */
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
iounmap(addr);
}
EXPORT_SYMBOL(pci_iounmap);
// SPDX-License-Identifier: GPL-2.0
/*
* Implement the default iomap interfaces
*
* (C) Copyright 2004 Linus Torvalds
* (C) Copyright 2006 Ralf Baechle <ralf@linux-mips.org>
* (C) Copyright 2007 MIPS Technologies, Inc.
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/export.h>
#include <asm/io.h>
/*
* Read/write from/to an (offsettable) iomem cookie. It might be a PIO
* access or a MMIO access, these functions don't care. The info is
* encoded in the hardware mapping set up by the mapping functions
* (or the cookie itself, depending on implementation and hw).
*
* The generic routines don't assume any hardware mappings, and just
* encode the PIO/MMIO as part of the cookie. They coldly assume that
* the MMIO IO mappings are not in the low address range.
*
* Architectures for which this is not true can't use this generic
* implementation and should do their own copy.
*/
#define PIO_MASK 0x0ffffUL
unsigned int ioread8(void __iomem *addr)
{
return readb(addr);
}
EXPORT_SYMBOL(ioread8);
unsigned int ioread16(void __iomem *addr)
{
return readw(addr);
}
EXPORT_SYMBOL(ioread16);
unsigned int ioread16be(void __iomem *addr)
{
return be16_to_cpu(__raw_readw(addr));
}
EXPORT_SYMBOL(ioread16be);
unsigned int ioread32(void __iomem *addr)
{
return readl(addr);
}
EXPORT_SYMBOL(ioread32);
unsigned int ioread32be(void __iomem *addr)
{
return be32_to_cpu(__raw_readl(addr));
}
EXPORT_SYMBOL(ioread32be);
void iowrite8(u8 val, void __iomem *addr)
{
writeb(val, addr);
}
EXPORT_SYMBOL(iowrite8);
void iowrite16(u16 val, void __iomem *addr)
{
writew(val, addr);
}
EXPORT_SYMBOL(iowrite16);
void iowrite16be(u16 val, void __iomem *addr)
{
__raw_writew(cpu_to_be16(val), addr);
}
EXPORT_SYMBOL(iowrite16be);
void iowrite32(u32 val, void __iomem *addr)
{
writel(val, addr);
}
EXPORT_SYMBOL(iowrite32);
void iowrite32be(u32 val, void __iomem *addr)
{
__raw_writel(cpu_to_be32(val), addr);
}
EXPORT_SYMBOL(iowrite32be);
/*
* These are the "repeat MMIO read/write" functions.
* Note the "__mem" accesses, since we want to convert
* to CPU byte order if the host bus happens to not match the
* endianness of PCI/ISA (see mach-generic/mangle-port.h).
*/
static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
{
while (--count >= 0) {
u8 data = __mem_readb(addr);
*dst = data;
dst++;
}
}
static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
{
while (--count >= 0) {
u16 data = __mem_readw(addr);
*dst = data;
dst++;
}
}
static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
{
while (--count >= 0) {
u32 data = __mem_readl(addr);
*dst = data;
dst++;
}
}
static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
{
while (--count >= 0) {
__mem_writeb(*src, addr);
src++;
}
}
static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
{
while (--count >= 0) {
__mem_writew(*src, addr);
src++;
}
}
static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
{
while (--count >= 0) {
__mem_writel(*src, addr);
src++;
}
}
void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
{
mmio_insb(addr, dst, count);
}
EXPORT_SYMBOL(ioread8_rep);
void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
{
mmio_insw(addr, dst, count);
}
EXPORT_SYMBOL(ioread16_rep);
void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
{
mmio_insl(addr, dst, count);
}
EXPORT_SYMBOL(ioread32_rep);
void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
{
mmio_outsb(addr, src, count);
}
EXPORT_SYMBOL(iowrite8_rep);
void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
{
mmio_outsw(addr, src, count);
}
EXPORT_SYMBOL(iowrite16_rep);
void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
{
mmio_outsl(addr, src, count);
}
EXPORT_SYMBOL(iowrite32_rep);
/*
* Create a virtual mapping cookie for an IO port range
*
* This uses the same mapping are as the in/out family which has to be setup
* by the platform initialization code.
*
* Just to make matters somewhat more interesting on MIPS systems with
* multiple host bridge each will have it's own ioport address space.
*/
static void __iomem *ioport_map_legacy(unsigned long port, unsigned int nr)
{
return (void __iomem *) (mips_io_port_base + port);
}
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
if (port > PIO_MASK)
return NULL;
return ioport_map_legacy(port, nr);
}
EXPORT_SYMBOL(ioport_map);
void ioport_unmap(void __iomem *addr)
{
/* Nothing to do */
}
EXPORT_SYMBOL(ioport_unmap);
......@@ -204,9 +204,10 @@
#define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler)
#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
#define _PREF(hint, addr, type) \
#ifdef CONFIG_CPU_HAS_PREFETCH
# define _PREF(hint, addr, type) \
.if \mode == LEGACY_MODE; \
PREF(hint, addr); \
kernel_pref(hint, addr); \
.else; \
.if ((\from == USEROP) && (type == SRC_PREFETCH)) || \
((\to == USEROP) && (type == DST_PREFETCH)); \
......@@ -218,12 +219,15 @@
* used later on. Therefore use $v1. \
*/ \
.set at=v1; \
PREFE(hint, addr); \
user_pref(hint, addr); \
.set noat; \
.else; \
PREF(hint, addr); \
kernel_pref(hint, addr); \
.endif; \
.endif
#else
# define _PREF(hint, addr, type)
#endif
#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
......@@ -297,7 +301,7 @@
and t0, src, ADDRMASK
PREFS( 0, 2*32(src) )
PREFD( 1, 2*32(dst) )
#ifndef CONFIG_CPU_MIPSR6
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
bnez t1, .Ldst_unaligned\@
nop
bnez t0, .Lsrc_unaligned_dst_aligned\@
......@@ -385,7 +389,7 @@
bne rem, len, 1b
.set noreorder
#ifndef CONFIG_CPU_MIPSR6
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
......@@ -487,7 +491,7 @@
bne len, rem, 1b
.set noreorder
#endif /* !CONFIG_CPU_MIPSR6 */
#endif /* CONFIG_CPU_HAS_LOAD_STORE_LR */
.Lcopy_bytes_checklen\@:
beqz len, .Ldone\@
nop
......@@ -516,7 +520,7 @@
jr ra
nop
#ifdef CONFIG_CPU_MIPSR6
#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR
.Lcopy_unaligned_bytes\@:
1:
COPY_BYTE(0)
......@@ -530,7 +534,7 @@
ADD src, src, 8
b 1b
ADD dst, dst, 8
#endif /* CONFIG_CPU_MIPSR6 */
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
.if __memcpy == 1
END(memcpy)
.set __memcpy, 0
......
......@@ -78,7 +78,6 @@
#endif
.endm
.set noreorder
.align 5
/*
......@@ -94,13 +93,16 @@
.endif
sltiu t0, a2, STORSIZE /* very small region? */
.set noreorder
bnez t0, .Lsmall_memset\@
andi t0, a0, STORMASK /* aligned? */
.set reorder
#ifdef CONFIG_CPU_MICROMIPS
move t8, a1 /* used by 'swp' instruction */
move t9, a1
#endif
.set noreorder
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
beqz t0, 1f
PTR_SUBU t0, STORSIZE /* alignment in bytes */
......@@ -111,8 +113,9 @@
PTR_SUBU t0, AT /* alignment in bytes */
.set at
#endif
.set reorder
#ifndef CONFIG_CPU_MIPSR6
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
......@@ -122,11 +125,13 @@
PTR_SUBU a0, t0 /* long align ptr */
PTR_ADDU a2, t0 /* correct size */
#else /* CONFIG_CPU_MIPSR6 */
#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define STORE_BYTE(N) \
EX(sb, a1, N(a0), .Lbyte_fixup\@); \
.set noreorder; \
beqz t0, 0f; \
PTR_ADDU t0, 1;
PTR_ADDU t0, 1; \
.set reorder;
PTR_ADDU a2, t0 /* correct size */
PTR_ADDU t0, 1
......@@ -145,19 +150,17 @@
ori a0, STORMASK
xori a0, STORMASK
PTR_ADDIU a0, STORSIZE
#endif /* CONFIG_CPU_MIPSR6 */
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
1: ori t1, a2, 0x3f /* # of full blocks */
xori t1, 0x3f
beqz t1, .Lmemset_partial\@ /* no block to fill */
andi t0, a2, 0x40-STORSIZE
beqz t1, .Lmemset_partial\@ /* no block to fill */
PTR_ADDU t1, a0 /* end address */
.set reorder
1: PTR_ADDIU a0, 64
R10KCBARRIER(0(ra))
f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode
bne t1, a0, 1b
.set noreorder
.Lmemset_partial\@:
R10KCBARRIER(0(ra))
......@@ -173,20 +176,18 @@
PTR_SUBU t1, AT
.set at
#endif
jr t1
PTR_ADDU a0, t0 /* dest ptr */
jr t1
.set push
.set noreorder
.set nomacro
/* ... but first do longs ... */
f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode
2: .set pop
andi a2, STORMASK /* At most one long to go */
2: andi a2, STORMASK /* At most one long to go */
.set noreorder
beqz a2, 1f
#ifndef CONFIG_CPU_MIPSR6
#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
PTR_ADDU a0, a2 /* What's left */
.set reorder
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@)
......@@ -195,6 +196,7 @@
#endif
#else
PTR_SUBU t0, $0, a2
.set reorder
move a2, zero /* No remaining longs */
PTR_ADDIU t0, 1
STORE_BYTE(0)
......@@ -210,41 +212,42 @@
#endif
0:
#endif
1: jr ra
move a2, zero
1: move a2, zero
jr ra
.Lsmall_memset\@:
beqz a2, 2f
PTR_ADDU t1, a0, a2
beqz a2, 2f
1: PTR_ADDIU a0, 1 /* fill bytewise */
R10KCBARRIER(0(ra))
.set noreorder
bne t1, a0, 1b
EX(sb, a1, -1(a0), .Lsmall_fixup\@)
.set reorder
2: jr ra /* done */
move a2, zero
2: move a2, zero
jr ra /* done */
.if __memset == 1
END(memset)
.set __memset, 0
.hidden __memset
.endif
#ifdef CONFIG_CPU_MIPSR6
#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR
.Lbyte_fixup\@:
/*
* unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
* a2 = a2 - t0 + 1
*/
PTR_SUBU a2, t0
jr ra
PTR_ADDIU a2, 1
#endif /* CONFIG_CPU_MIPSR6 */
jr ra
#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
.Lfirst_fixup\@:
/* unset_bytes already in a2 */
jr ra
nop
.Lfwd_fixup\@:
/*
......@@ -255,8 +258,8 @@
andi a2, 0x3f
LONG_L t0, THREAD_BUADDR(t0)
LONG_ADDU a2, t1
jr ra
LONG_SUBU a2, t0
jr ra
.Lpartial_fixup\@:
/*
......@@ -267,24 +270,21 @@
andi a2, STORMASK
LONG_L t0, THREAD_BUADDR(t0)
LONG_ADDU a2, a0
jr ra
LONG_SUBU a2, t0
jr ra
.Llast_fixup\@:
/* unset_bytes already in a2 */
jr ra
nop
.Lsmall_fixup\@:
/*
* unset_bytes = end_addr - current_addr + 1
* a2 = t1 - a0 + 1
*/
.set reorder
PTR_SUBU a2, t1, a0
PTR_ADDIU a2, 1
jr ra
.set noreorder
.endm
......@@ -298,8 +298,8 @@
LEAF(memset)
EXPORT_SYMBOL(memset)
beqz a1, 1f
move v0, a0 /* result */
beqz a1, 1f
andi a1, 0xff /* spread fillword */
LONG_SLL t1, a1, 8
......
......@@ -6,7 +6,6 @@
obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
bonito-irq.o mem.o machtype.o platform.o serial.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_CPU_LOONGSON2) += dma.o
#
# Serial port support
......
......@@ -2,4 +2,4 @@
# Makefile for Lemote Fuloong2e mini-PC board.
#
obj-y += irq.o reset.o
obj-y += irq.o reset.o dma.o
// SPDX-License-Identifier: GPL-2.0
#include <linux/dma-direct.h>
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr | 0x80000000;
}
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr & 0x7fffffff;
}
......@@ -2,7 +2,7 @@
# Makefile for lemote loongson2f family machines
#
obj-y += clock.o machtype.o irq.o reset.o ec_kb3310b.o
obj-y += clock.o machtype.o irq.o reset.o dma.o ec_kb3310b.o
#
# Suspend Support
......
......@@ -8,11 +8,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
if (dma_addr > 0x8fffffff)
return dma_addr;
return dma_addr & 0x0fffffff;
#else
return dma_addr & 0x7fffffff;
#endif
}
......@@ -96,51 +96,8 @@ void mach_irq_dispatch(unsigned int pending)
}
}
static struct irqaction cascade_irqaction = {
.handler = no_action,
.flags = IRQF_NO_SUSPEND,
.name = "cascade",
};
static inline void mask_loongson_irq(struct irq_data *d)
{
clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
irq_disable_hazard();
/* Workaround: UART IRQ may deliver to any core */
if (d->irq == LOONGSON_UART_IRQ) {
int cpu = smp_processor_id();
int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
u64 intenclr_addr = smp_group[node_id] |
(u64)(&LOONGSON_INT_ROUTER_INTENCLR);
u64 introuter_lpc_addr = smp_group[node_id] |
(u64)(&LOONGSON_INT_ROUTER_LPC);
*(volatile u32 *)intenclr_addr = 1 << 10;
*(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
}
}
static inline void unmask_loongson_irq(struct irq_data *d)
{
/* Workaround: UART IRQ may deliver to any core */
if (d->irq == LOONGSON_UART_IRQ) {
int cpu = smp_processor_id();
int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
u64 intenset_addr = smp_group[node_id] |
(u64)(&LOONGSON_INT_ROUTER_INTENSET);
u64 introuter_lpc_addr = smp_group[node_id] |
(u64)(&LOONGSON_INT_ROUTER_LPC);
*(volatile u32 *)intenset_addr = 1 << 10;
*(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
}
set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
irq_enable_hazard();
}
static inline void mask_loongson_irq(struct irq_data *d) { }
static inline void unmask_loongson_irq(struct irq_data *d) { }
/* For MIPS IRQs which shared by all cores */
static struct irq_chip loongson_irq_chip = {
......@@ -183,12 +140,11 @@ void __init mach_init_irq(void)
chip->irq_set_affinity = plat_set_irq_affinity;
irq_set_chip_and_handler(LOONGSON_UART_IRQ,
&loongson_irq_chip, handle_level_irq);
/* setup HT1 irq */
setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction);
&loongson_irq_chip, handle_percpu_irq);
irq_set_chip_and_handler(LOONGSON_BRIDGE_IRQ,
&loongson_irq_chip, handle_percpu_irq);
set_c0_status(STATUSF_IP2 | STATUSF_IP6);
set_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP6);
}
#ifdef CONFIG_HOTPLUG_CPU
......
......@@ -180,43 +180,39 @@ static void __init szmem(unsigned int node)
static void __init node_mem_init(unsigned int node)
{
unsigned long bootmap_size;
unsigned long node_addrspace_offset;
unsigned long start_pfn, end_pfn, freepfn;
unsigned long start_pfn, end_pfn;
node_addrspace_offset = nid_to_addroffset(node);
pr_info("Node%d's addrspace_offset is 0x%lx\n",
node, node_addrspace_offset);
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
freepfn = start_pfn;
if (node == 0)
freepfn = PFN_UP(__pa_symbol(&_end)); /* kernel end address */
pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx, freepfn=0x%lx\n",
node, start_pfn, end_pfn, freepfn);
pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
node, start_pfn, end_pfn);
__node_data[node] = prealloc__node_data + node;
NODE_DATA(node)->bdata = &bootmem_node_data[node];
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
bootmap_size = init_bootmem_node(NODE_DATA(node), freepfn,
start_pfn, end_pfn);
free_bootmem_with_active_regions(node, end_pfn);
if (node == 0) /* used by finalize_initrd() */
if (node == 0) {
/* kernel end address */
unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end));
/* used by finalize_initrd() */
max_low_pfn = end_pfn;
/* This is reserved for the kernel and bdata->node_bootmem_map */
reserve_bootmem_node(NODE_DATA(node), start_pfn << PAGE_SHIFT,
((freepfn - start_pfn) << PAGE_SHIFT) + bootmap_size,
BOOTMEM_DEFAULT);
/* Reserve the kernel text/data/bss */
memblock_reserve(start_pfn << PAGE_SHIFT,
((kernel_end_pfn - start_pfn) << PAGE_SHIFT));
if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) {
/* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */
reserve_bootmem_node(NODE_DATA(node),
(node_addrspace_offset | 0xfe000000),
32 << 20, BOOTMEM_DEFAULT);
if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
memblock_reserve((node_addrspace_offset | 0xfe000000),
32 << 20);
}
sparse_memory_present_with_active_regions(node);
......
......@@ -21,6 +21,7 @@
#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/kexec.h>
#include <asm/processor.h>
#include <asm/time.h>
#include <asm/clock.h>
......@@ -349,7 +350,7 @@ static void loongson3_smp_finish(void)
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
local_irq_enable();
loongson3_ipi_write64(0,
(void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0));
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
pr_info("CPU#%d finished, CP0_ST=%x\n",
smp_processor_id(), read_c0_status());
}
......@@ -416,13 +417,13 @@ static int loongson3_boot_secondary(int cpu, struct task_struct *idle)
cpu, startargs[0], startargs[1], startargs[2]);
loongson3_ipi_write64(startargs[3],
(void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x18));
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x18);
loongson3_ipi_write64(startargs[2],
(void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x10));
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x10);
loongson3_ipi_write64(startargs[1],
(void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x8));
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x8);
loongson3_ipi_write64(startargs[0],
(void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0));
ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
return 0;
}
......@@ -749,4 +750,7 @@ const struct plat_smp_ops loongson3_smp_ops = {
.cpu_disable = loongson3_cpu_disable,
.cpu_die = loongson3_cpu_die,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
......@@ -32,7 +32,6 @@
#include <linux/kcore.h>
#include <linux/initrd.h>
#include <asm/asm-offsets.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
#include <asm/cpu.h>
......@@ -521,17 +520,13 @@ unsigned long pgd_current[NR_CPUS];
#endif
/*
* gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
* are constants. So we use the variants from asm-offset.h until that gcc
* will officially be retired.
*
* Align swapper_pg_dir in to 64K, allows its address to be loaded
* with a single LUI instruction in the TLB handlers. If we used
* __aligned(64K), its size would get rounded up to the alignment
* size, and waste space. So we place it in its own section and align
* it in the linker script.
*/
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
#ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
#endif
......
......@@ -291,7 +291,7 @@ static int __init xlp_of_pic_init(struct device_node *node,
/* we need a hack to get the PIC's SoC chip id */
ret = of_address_to_resource(node, 0, &res);
if (ret < 0) {
pr_err("PIC %s: reg property not found!\n", node->name);
pr_err("PIC %pOFn: reg property not found!\n", node);
return -EINVAL;
}
......@@ -304,21 +304,21 @@ static int __init xlp_of_pic_init(struct device_node *node,
break;
}
if (socid == NLM_NR_NODES) {
pr_err("PIC %s: Node mapping for bus %d not found!\n",
node->name, bus);
pr_err("PIC %pOFn: Node mapping for bus %d not found!\n",
node, bus);
return -EINVAL;
}
} else {
socid = (res.start >> 18) & 0x3;
if (!nlm_node_present(socid)) {
pr_err("PIC %s: node %d does not exist!\n",
node->name, socid);
pr_err("PIC %pOFn: node %d does not exist!\n",
node, socid);
return -EINVAL;
}
}
if (!nlm_node_present(socid)) {
pr_err("PIC %s: node %d does not exist!\n", node->name, socid);
pr_err("PIC %pOFn: node %d does not exist!\n", node, socid);
return -EINVAL;
}
......@@ -326,7 +326,7 @@ static int __init xlp_of_pic_init(struct device_node *node,
nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
&xlp_pic_irq_domain_ops, NULL);
if (xlp_pic_domain == NULL) {
pr_err("PIC %s: Creating legacy domain failed!\n", node->name);
pr_err("PIC %pOFn: Creating legacy domain failed!\n", node);
return -EINVAL;
}
pr_info("Node %d: IRQ domain created for PIC@%pR\n", socid, &res);
......
......@@ -18,22 +18,36 @@ static int loongson3_pci_config_access(unsigned char access_type,
int where, u32 *data)
{
unsigned char busnum = bus->number;
u_int64_t addr, type;
void *addrp;
int device = PCI_SLOT(devfn);
int function = PCI_FUNC(devfn);
int device = PCI_SLOT(devfn);
int reg = where & ~3;
void *addrp;
u64 addr;
if (where < PCI_CFG_SPACE_SIZE) { /* standard config */
addr = (busnum << 16) | (device << 11) | (function << 8) | reg;
if (busnum == 0) {
if (device > 31)
return PCIBIOS_DEVICE_NOT_FOUND;
addrp = (void *)(TO_UNCAC(HT1LO_PCICFG_BASE) | (addr & 0xffff));
type = 0;
addrp = (void *)TO_UNCAC(HT1LO_PCICFG_BASE | addr);
} else {
addrp = (void *)TO_UNCAC(HT1LO_PCICFG_BASE_TP1 | addr);
}
} else if (where < PCI_CFG_SPACE_EXP_SIZE) { /* extended config */
struct pci_dev *rootdev;
rootdev = pci_get_domain_bus_and_slot(0, 0, 0);
if (!rootdev)
return PCIBIOS_DEVICE_NOT_FOUND;
addr = pci_resource_start(rootdev, 3);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
addr |= busnum << 20 | device << 15 | function << 12 | reg;
addrp = (void *)TO_UNCAC(addr);
} else {
addrp = (void *)(TO_UNCAC(HT1LO_PCICFG_BASE_TP1) | (addr));
type = 0x10000;
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (access_type == PCI_ACCESS_WRITE)
......
......@@ -127,8 +127,12 @@ static void pcibios_scanbus(struct pci_controller *hose)
if (pci_has_flag(PCI_PROBE_ONLY)) {
pci_bus_claim_resources(bus);
} else {
struct pci_bus *child;
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
}
pci_bus_add_devices(bus);
}
......
......@@ -246,6 +246,8 @@ static int rt288x_pci_probe(struct platform_device *pdev)
rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
(void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
rt2880_pci_controller.of_node = pdev->dev.of_node;
register_pci_controller(&rt2880_pci_controller);
return 0;
}
......
......@@ -133,13 +133,13 @@ static int __init msp_usb_setup(void)
* "D" for device-mode. If it works for Ethernet, why not USB...
* -- hammtrev, 2007/03/22
*/
snprintf((char *)&envstr[0], sizeof(envstr), "usbmode");
snprintf(&envstr[0], sizeof(envstr), "usbmode");
/* set default host mode */
val = 1;
/* get environment string */
strp = prom_getenv((char *)&envstr[0]);
strp = prom_getenv(&envstr[0]);
if (strp) {
/* compare string */
if (!strcmp(strp, "device"))
......
......@@ -134,7 +134,7 @@ static int __init ralink_systick_init(struct device_node *np)
systick.dev.min_delta_ticks = 0x3;
systick.dev.irq = irq_of_parse_and_map(np, 0);
if (!systick.dev.irq) {
pr_err("%s: request_irq failed", np->name);
pr_err("%pOFn: request_irq failed", np);
return -EINVAL;
}
......@@ -146,8 +146,8 @@ static int __init ralink_systick_init(struct device_node *np)
clockevents_register_device(&systick.dev);
pr_info("%s: running - mult: %d, shift: %d\n",
np->name, systick.dev.mult, systick.dev.shift);
pr_info("%pOFn: running - mult: %d, shift: %d\n",
np, systick.dev.mult, systick.dev.shift);
return 0;
}
......
......@@ -62,7 +62,7 @@ static int __init ill_acc_of_setup(void)
pdev = of_find_device_by_node(np);
if (!pdev) {
pr_err("%s: failed to lookup pdev\n", np->name);
pr_err("%pOFn: failed to lookup pdev\n", np);
return -EINVAL;
}
......
......@@ -49,6 +49,10 @@ static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) };
static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) };
static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) };
static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) };
static struct rt2880_pmx_func rt3352_cs1_func[] = {
FUNC("spi_cs1", 0, 45, 1),
FUNC("wdg_cs1", 1, 45, 1),
};
static struct rt2880_pmx_group rt3050_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
......@@ -75,6 +79,7 @@ static struct rt2880_pmx_group rt3352_pinmux_data[] = {
GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA),
GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA),
GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
GRP("spi_cs1", rt3352_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
{ 0 }
};
......
......@@ -464,7 +464,7 @@ void ip22_be_interrupt(int irq)
die_if_kernel("Oops", regs);
force_sig(SIGBUS, current);
} else if (debug_be_interrupt)
show_regs((struct pt_regs *)regs);
show_regs(regs);
}
static int ip28_be_handler(struct pt_regs *regs, int is_fixup)
......
......@@ -389,7 +389,6 @@ static void __init node_mem_init(cnodeid_t node)
{
unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
unsigned long slot_freepfn = node_getfirstfree(node);
unsigned long bootmap_size;
unsigned long start_pfn, end_pfn;
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
......@@ -400,7 +399,6 @@ static void __init node_mem_init(cnodeid_t node)
__node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
memset(__node_data[node], 0, PAGE_SIZE);
NODE_DATA(node)->bdata = &bootmem_node_data[node];
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
......@@ -409,12 +407,11 @@ static void __init node_mem_init(cnodeid_t node)
slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
sizeof(struct hub_data));
bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
start_pfn, end_pfn);
free_bootmem_with_active_regions(node, end_pfn);
reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size,
BOOTMEM_DEFAULT);
memblock_reserve(slot_firstpfn << PAGE_SHIFT,
((slot_freepfn - slot_firstpfn) << PAGE_SHIFT));
sparse_memory_present_with_active_regions(node);
}
......
# SPDX-License-Identifier: GPL-2.0
hostprogs-y := elf-entry
PHONY += elf-entry
elf-entry: $(obj)/elf-entry
@:
// SPDX-License-Identifier: GPL-2.0
#include <byteswap.h>
#include <elf.h>
#include <endian.h>
#include <inttypes.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef be32toh
/* If libc provides [bl]e{32,64}toh() then we'll use them */
#elif BYTE_ORDER == LITTLE_ENDIAN
# define be32toh(x) bswap_32(x)
# define le32toh(x) (x)
# define be64toh(x) bswap_64(x)
# define le64toh(x) (x)
#elif BYTE_ORDER == BIG_ENDIAN
# define be32toh(x) (x)
# define le32toh(x) bswap_32(x)
# define be64toh(x) (x)
# define le64toh(x) bswap_64(x)
#endif
__attribute__((noreturn))
static void die(const char *msg)
{
fputs(msg, stderr);
exit(EXIT_FAILURE);
}
int main(int argc, const char *argv[])
{
uint64_t entry;
size_t nread;
FILE *file;
union {
Elf32_Ehdr ehdr32;
Elf64_Ehdr ehdr64;
} hdr;
if (argc != 2)
die("Usage: elf-entry <elf-file>\n");
file = fopen(argv[1], "r");
if (!file) {
perror("Unable to open input file");
return EXIT_FAILURE;
}
nread = fread(&hdr, 1, sizeof(hdr), file);
if (nread != sizeof(hdr)) {
perror("Unable to read input file");
return EXIT_FAILURE;
}
if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG))
die("Input is not an ELF\n");
switch (hdr.ehdr32.e_ident[EI_CLASS]) {
case ELFCLASS32:
switch (hdr.ehdr32.e_ident[EI_DATA]) {
case ELFDATA2LSB:
entry = le32toh(hdr.ehdr32.e_entry);
break;
case ELFDATA2MSB:
entry = be32toh(hdr.ehdr32.e_entry);
break;
default:
die("Invalid ELF encoding\n");
}
/* Sign extend to form a canonical address */
entry = (int64_t)(int32_t)entry;
break;
case ELFCLASS64:
switch (hdr.ehdr32.e_ident[EI_DATA]) {
case ELFDATA2LSB:
entry = le64toh(hdr.ehdr64.e_entry);
break;
case ELFDATA2MSB:
entry = be64toh(hdr.ehdr64.e_entry);
break;
default:
die("Invalid ELF encoding\n");
}
break;
default:
die("Invalid ELF class\n");
}
printf("0x%016" PRIx64 "\n", entry);
return EXIT_SUCCESS;
}
......@@ -960,12 +960,11 @@ void __init txx9_sramc_init(struct resource *r)
goto exit_put;
err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr);
if (err) {
device_unregister(&dev->dev);
iounmap(dev->base);
kfree(dev);
device_unregister(&dev->dev);
}
return;
exit_put:
iounmap(dev->base);
put_device(&dev->dev);
return;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment