Commit 241738bd authored by Ralf Baechle's avatar Ralf Baechle

Merge branch 'mips-next' of http://dev.phrozen.org/githttp/mips-next into mips-for-linux-next

parents bdf20507 ce8f0d06
......@@ -106,6 +106,7 @@ config ATH79
config BCM47XX
bool "Broadcom BCM47XX based boards"
select ARCH_WANT_OPTIONAL_GPIOLIB
select CEVT_R4K
select CSRC_R4K
select DMA_NONCOHERENT
......@@ -114,7 +115,6 @@ config BCM47XX
select IRQ_CPU
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select GENERIC_GPIO
select SYS_HAS_EARLY_PRINTK
help
Support for BCM47XX based boards
......@@ -798,7 +798,7 @@ config NLM_XLR_BOARD
select CSRC_R4K
select IRQ_CPU
select ARCH_SUPPORTS_MSI
select ZONE_DMA if 64BIT
select ZONE_DMA32 if 64BIT
select SYNC_R4K
select SYS_HAS_EARLY_PRINTK
select USB_ARCH_HAS_OHCI if USB_SUPPORT
......@@ -826,7 +826,7 @@ config NLM_XLP_BOARD
select CEVT_R4K
select CSRC_R4K
select IRQ_CPU
select ZONE_DMA if 64BIT
select ZONE_DMA32 if 64BIT
select SYNC_R4K
select SYS_HAS_EARLY_PRINTK
select USE_OF
......@@ -1507,6 +1507,7 @@ config CPU_XLP
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select CPU_HAS_PREFETCH
select CPU_MIPSR2
help
Netlogic Microsystems XLP processors.
endchoice
......@@ -1718,7 +1719,7 @@ config CPU_SUPPORTS_UNCACHED_ACCELERATED
bool
config MIPS_PGD_C0_CONTEXT
bool
default y if 64BIT && CPU_MIPSR2
default y if 64BIT && CPU_MIPSR2 && !CPU_XLP
#
# Set to y for ptrace access to watch registers.
......@@ -2149,7 +2150,7 @@ config NODES_SHIFT
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON)
depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP)
default y
help
Enable hardware performance counter support for perf events. If
......
......@@ -9,6 +9,7 @@ config BCM47XX_SSB
select SSB_EMBEDDED
select SSB_B43_PCI_BRIDGE if PCI
select SSB_PCICORE_HOSTMODE if PCI
select SSB_DRIVER_GPIO
default y
help
Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
......@@ -23,6 +24,7 @@ config BCM47XX_BCMA
select BCMA_DRIVER_MIPS
select BCMA_HOST_PCI if PCI
select BCMA_DRIVER_PCI_HOSTMODE if PCI
select BCMA_DRIVER_GPIO
default y
help
Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
......
......@@ -3,5 +3,5 @@
# under Linux.
#
obj-y += gpio.o irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
obj-y += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
obj-$(CONFIG_BCM47XX_SSB) += wgt634u.o
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
*/
#include <linux/export.h>
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_driver_chipcommon.h>
#include <linux/ssb/ssb_driver_extif.h>
#include <asm/mach-bcm47xx/bcm47xx.h>
#include <asm/mach-bcm47xx/gpio.h>
#if (BCM47XX_CHIPCO_GPIO_LINES > BCM47XX_EXTIF_GPIO_LINES)
static DECLARE_BITMAP(gpio_in_use, BCM47XX_CHIPCO_GPIO_LINES);
#else
static DECLARE_BITMAP(gpio_in_use, BCM47XX_EXTIF_GPIO_LINES);
#endif
int gpio_request(unsigned gpio, const char *tag)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) &&
((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES))
return -EINVAL;
if (ssb_extif_available(&bcm47xx_bus.ssb.extif) &&
((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES))
return -EINVAL;
if (test_and_set_bit(gpio, gpio_in_use))
return -EBUSY;
return 0;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
if (gpio >= BCM47XX_CHIPCO_GPIO_LINES)
return -EINVAL;
if (test_and_set_bit(gpio, gpio_in_use))
return -EBUSY;
return 0;
#endif
}
return -EINVAL;
}
EXPORT_SYMBOL(gpio_request);
void gpio_free(unsigned gpio)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco) &&
((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES))
return;
if (ssb_extif_available(&bcm47xx_bus.ssb.extif) &&
((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES))
return;
clear_bit(gpio, gpio_in_use);
return;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
if (gpio >= BCM47XX_CHIPCO_GPIO_LINES)
return;
clear_bit(gpio, gpio_in_use);
return;
#endif
}
}
EXPORT_SYMBOL(gpio_free);
int gpio_to_irq(unsigned gpio)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
if (ssb_chipco_available(&bcm47xx_bus.ssb.chipco))
return ssb_mips_irq(bcm47xx_bus.ssb.chipco.dev) + 2;
else if (ssb_extif_available(&bcm47xx_bus.ssb.extif))
return ssb_mips_irq(bcm47xx_bus.ssb.extif.dev) + 2;
else
return -EINVAL;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
return bcma_core_mips_irq(bcm47xx_bus.bcma.bus.drv_cc.core) + 2;
#endif
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(gpio_to_irq);
/*
* Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
* Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
* Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -27,6 +28,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <asm/bootinfo.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/fw/cfe/cfe_error.h>
......@@ -127,6 +129,8 @@ static __init void prom_init_mem(void)
{
unsigned long mem;
unsigned long max;
unsigned long off;
struct cpuinfo_mips *c = &current_cpu_data;
/* Figure out memory size by finding aliases.
*
......@@ -143,18 +147,26 @@ static __init void prom_init_mem(void)
* max contains the biggest possible address supported by the platform.
* If the method wants to try something above we assume 128MB ram.
*/
max = ((unsigned long)(prom_init) | ((128 << 20) - 1));
off = (unsigned long)prom_init;
max = off | ((128 << 20) - 1);
for (mem = (1 << 20); mem < (128 << 20); mem += (1 << 20)) {
if (((unsigned long)(prom_init) + mem) > max) {
if ((off + mem) > max) {
mem = (128 << 20);
printk(KERN_DEBUG "assume 128MB RAM\n");
break;
}
if (*(unsigned long *)((unsigned long)(prom_init) + mem) ==
*(unsigned long *)(prom_init))
if (!memcmp(prom_init, prom_init + mem, 32))
break;
}
/* Ignoring the last page when ddr size is 128M. Cached
* accesses to last page is causing the processor to prefetch
* using address above 128M stepping out of the ddr address
* space.
*/
if (c->cputype == CPU_74K && (mem == (128 << 20)))
mem -= 0x1000;
add_memory_region(0, mem, BOOT_MEM_RAM);
}
......
......@@ -94,7 +94,7 @@ static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
bcm47xx_fill_sprom(out, prefix);
bcm47xx_fill_sprom(out, prefix, false);
return 0;
} else {
printk(KERN_WARNING "bcm47xx: unable to fill SPROM for given bustype.\n");
......@@ -113,7 +113,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
bcm47xx_fill_ssb_boardinfo(&iv->boardinfo, NULL);
memset(&iv->sprom, 0, sizeof(struct ssb_sprom));
bcm47xx_fill_sprom(&iv->sprom, NULL);
bcm47xx_fill_sprom(&iv->sprom, NULL, false);
if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10);
......@@ -165,16 +165,17 @@ static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
bcm47xx_fill_sprom(out, prefix);
bcm47xx_fill_sprom(out, prefix, false);
return 0;
case BCMA_HOSTTYPE_SOC:
memset(out, 0, sizeof(struct ssb_sprom));
bcm47xx_fill_sprom_ethernet(out, NULL);
core = bcma_find_core(bus, BCMA_CORE_80211);
if (core) {
snprintf(prefix, sizeof(prefix), "sb/%u/",
core->core_index);
bcm47xx_fill_sprom(out, prefix);
bcm47xx_fill_sprom(out, prefix, true);
} else {
bcm47xx_fill_sprom(out, NULL, false);
}
return 0;
default:
......
......@@ -42,25 +42,39 @@ static void create_key(const char *prefix, const char *postfix,
snprintf(buf, len, "%s", name);
}
static int get_nvram_var(const char *prefix, const char *postfix,
const char *name, char *buf, int len, bool fallback)
{
char key[40];
int err;
create_key(prefix, postfix, name, key, sizeof(key));
err = nvram_getenv(key, buf, len);
if (fallback && err == NVRAM_ERR_ENVNOTFOUND && prefix) {
create_key(NULL, postfix, name, key, sizeof(key));
err = nvram_getenv(key, buf, len);
}
return err;
}
#define NVRAM_READ_VAL(type) \
static void nvram_read_ ## type (const char *prefix, \
const char *postfix, const char *name, \
type *val, type allset) \
type *val, type allset, bool fallback) \
{ \
char buf[100]; \
char key[40]; \
int err; \
type var; \
\
create_key(prefix, postfix, name, key, sizeof(key)); \
\
err = nvram_getenv(key, buf, sizeof(buf)); \
err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf), \
fallback); \
if (err < 0) \
return; \
err = kstrto ## type (buf, 0, &var); \
if (err) { \
pr_warn("can not parse nvram name %s with value %s" \
" got %i", key, buf, err); \
pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \
prefix, name, postfix, buf, err); \
return; \
} \
if (allset && var == allset) \
......@@ -76,22 +90,19 @@ NVRAM_READ_VAL(u32)
#undef NVRAM_READ_VAL
static void nvram_read_u32_2(const char *prefix, const char *name,
u16 *val_lo, u16 *val_hi)
u16 *val_lo, u16 *val_hi, bool fallback)
{
char buf[100];
char key[40];
int err;
u32 val;
create_key(prefix, NULL, name, key, sizeof(key));
err = nvram_getenv(key, buf, sizeof(buf));
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
err = kstrtou32(buf, 0, &val);
if (err) {
pr_warn("can not parse nvram name %s with value %s got %i",
key, buf, err);
pr_warn("can not parse nvram name %s%s with value %s got %i\n",
prefix, name, buf, err);
return;
}
*val_lo = (val & 0x0000FFFFU);
......@@ -99,22 +110,20 @@ static void nvram_read_u32_2(const char *prefix, const char *name,
}
static void nvram_read_leddc(const char *prefix, const char *name,
u8 *leddc_on_time, u8 *leddc_off_time)
u8 *leddc_on_time, u8 *leddc_off_time,
bool fallback)
{
char buf[100];
char key[40];
int err;
u32 val;
create_key(prefix, NULL, name, key, sizeof(key));
err = nvram_getenv(key, buf, sizeof(buf));
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
err = kstrtou32(buf, 0, &val);
if (err) {
pr_warn("can not parse nvram name %s with value %s got %i",
key, buf, err);
pr_warn("can not parse nvram name %s%s with value %s got %i\n",
prefix, name, buf, err);
return;
}
......@@ -126,355 +135,435 @@ static void nvram_read_leddc(const char *prefix, const char *name,
}
static void nvram_read_macaddr(const char *prefix, const char *name,
u8 (*val)[6])
u8 (*val)[6], bool fallback)
{
char buf[100];
char key[40];
int err;
create_key(prefix, NULL, name, key, sizeof(key));
err = nvram_getenv(key, buf, sizeof(buf));
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
nvram_parse_macaddr(buf, *val);
}
static void nvram_read_alpha2(const char *prefix, const char *name,
char (*val)[2])
char (*val)[2], bool fallback)
{
char buf[10];
char key[40];
int err;
create_key(prefix, NULL, name, key, sizeof(key));
err = nvram_getenv(key, buf, sizeof(buf));
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
if (buf[0] == '0')
return;
if (strlen(buf) > 2) {
pr_warn("alpha2 is too long %s", buf);
pr_warn("alpha2 is too long %s\n", buf);
return;
}
memcpy(val, buf, sizeof(val));
}
static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom,
const char *prefix)
const char *prefix, bool fallback)
{
nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0);
if (!sprom->board_rev)
nvram_read_u16(NULL, NULL, "boardrev", &sprom->board_rev, 0);
nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0);
nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff);
nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff);
nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff);
nvram_read_u8(prefix, NULL, "ledbh3", &sprom->gpio3, 0xff);
nvram_read_u8(prefix, NULL, "aa2g", &sprom->ant_available_bg, 0);
nvram_read_u8(prefix, NULL, "aa5g", &sprom->ant_available_a, 0);
nvram_read_s8(prefix, NULL, "ag0", &sprom->antenna_gain.a0, 0);
nvram_read_s8(prefix, NULL, "ag1", &sprom->antenna_gain.a1, 0);
nvram_read_alpha2(prefix, "ccode", &sprom->alpha2);
nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff, fallback);
nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff, fallback);
nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff, fallback);
nvram_read_u8(prefix, NULL, "ledbh3", &sprom->gpio3, 0xff, fallback);
nvram_read_u8(prefix, NULL, "aa2g", &sprom->ant_available_bg, 0,
fallback);
nvram_read_u8(prefix, NULL, "aa5g", &sprom->ant_available_a, 0,
fallback);
nvram_read_s8(prefix, NULL, "ag0", &sprom->antenna_gain.a0, 0,
fallback);
nvram_read_s8(prefix, NULL, "ag1", &sprom->antenna_gain.a1, 0,
fallback);
nvram_read_alpha2(prefix, "ccode", &sprom->alpha2, fallback);
}
static void bcm47xx_fill_sprom_r12389(struct ssb_sprom *sprom,
const char *prefix)
const char *prefix, bool fallback)
{
nvram_read_u16(prefix, NULL, "pa0b0", &sprom->pa0b0, 0);
nvram_read_u16(prefix, NULL, "pa0b1", &sprom->pa0b1, 0);
nvram_read_u16(prefix, NULL, "pa0b2", &sprom->pa0b2, 0);
nvram_read_u8(prefix, NULL, "pa0itssit", &sprom->itssi_bg, 0);
nvram_read_u8(prefix, NULL, "pa0maxpwr", &sprom->maxpwr_bg, 0);
nvram_read_u16(prefix, NULL, "pa1b0", &sprom->pa1b0, 0);
nvram_read_u16(prefix, NULL, "pa1b1", &sprom->pa1b1, 0);
nvram_read_u16(prefix, NULL, "pa1b2", &sprom->pa1b2, 0);
nvram_read_u8(prefix, NULL, "pa1itssit", &sprom->itssi_a, 0);
nvram_read_u8(prefix, NULL, "pa1maxpwr", &sprom->maxpwr_a, 0);
nvram_read_u16(prefix, NULL, "pa0b0", &sprom->pa0b0, 0, fallback);
nvram_read_u16(prefix, NULL, "pa0b1", &sprom->pa0b1, 0, fallback);
nvram_read_u16(prefix, NULL, "pa0b2", &sprom->pa0b2, 0, fallback);
nvram_read_u8(prefix, NULL, "pa0itssit", &sprom->itssi_bg, 0, fallback);
nvram_read_u8(prefix, NULL, "pa0maxpwr", &sprom->maxpwr_bg, 0,
fallback);
nvram_read_u16(prefix, NULL, "pa1b0", &sprom->pa1b0, 0, fallback);
nvram_read_u16(prefix, NULL, "pa1b1", &sprom->pa1b1, 0, fallback);
nvram_read_u16(prefix, NULL, "pa1b2", &sprom->pa1b2, 0, fallback);
nvram_read_u8(prefix, NULL, "pa1itssit", &sprom->itssi_a, 0, fallback);
nvram_read_u8(prefix, NULL, "pa1maxpwr", &sprom->maxpwr_a, 0, fallback);
}
static void bcm47xx_fill_sprom_r1(struct ssb_sprom *sprom, const char *prefix)
static void bcm47xx_fill_sprom_r1(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
nvram_read_u16(prefix, NULL, "boardflags", &sprom->boardflags_lo, 0);
nvram_read_u8(prefix, NULL, "cc", &sprom->country_code, 0);
nvram_read_u16(prefix, NULL, "boardflags", &sprom->boardflags_lo, 0,
fallback);
nvram_read_u8(prefix, NULL, "cc", &sprom->country_code, 0, fallback);
}
static void bcm47xx_fill_sprom_r2389(struct ssb_sprom *sprom,
const char *prefix)
{
nvram_read_u8(prefix, NULL, "opo", &sprom->opo, 0);
nvram_read_u16(prefix, NULL, "pa1lob0", &sprom->pa1lob0, 0);
nvram_read_u16(prefix, NULL, "pa1lob1", &sprom->pa1lob1, 0);
nvram_read_u16(prefix, NULL, "pa1lob2", &sprom->pa1lob2, 0);
nvram_read_u16(prefix, NULL, "pa1hib0", &sprom->pa1hib0, 0);
nvram_read_u16(prefix, NULL, "pa1hib1", &sprom->pa1hib1, 0);
nvram_read_u16(prefix, NULL, "pa1hib2", &sprom->pa1hib2, 0);
nvram_read_u8(prefix, NULL, "pa1lomaxpwr", &sprom->maxpwr_al, 0);
nvram_read_u8(prefix, NULL, "pa1himaxpwr", &sprom->maxpwr_ah, 0);
}
static void bcm47xx_fill_sprom_r2(struct ssb_sprom *sprom, const char *prefix)
const char *prefix, bool fallback)
{
nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
&sprom->boardflags_hi);
nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0);
nvram_read_u8(prefix, NULL, "opo", &sprom->opo, 0, fallback);
nvram_read_u16(prefix, NULL, "pa1lob0", &sprom->pa1lob0, 0, fallback);
nvram_read_u16(prefix, NULL, "pa1lob1", &sprom->pa1lob1, 0, fallback);
nvram_read_u16(prefix, NULL, "pa1lob2", &sprom->pa1lob2, 0, fallback);
nvram_read_u16(prefix, NULL, "pa1hib0", &sprom->pa1hib0, 0, fallback);
nvram_read_u16(prefix, NULL, "pa1hib1", &sprom->pa1hib1, 0, fallback);
nvram_read_u16(prefix, NULL, "pa1hib2", &sprom->pa1hib2, 0, fallback);
nvram_read_u8(prefix, NULL, "pa1lomaxpwr", &sprom->maxpwr_al, 0,
fallback);
nvram_read_u8(prefix, NULL, "pa1himaxpwr", &sprom->maxpwr_ah, 0,
fallback);
}
static void bcm47xx_fill_sprom_r389(struct ssb_sprom *sprom, const char *prefix)
static void bcm47xx_fill_sprom_r389(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
nvram_read_u8(prefix, NULL, "bxa2g", &sprom->bxa2g, 0);
nvram_read_u8(prefix, NULL, "rssisav2g", &sprom->rssisav2g, 0);
nvram_read_u8(prefix, NULL, "rssismc2g", &sprom->rssismc2g, 0);
nvram_read_u8(prefix, NULL, "rssismf2g", &sprom->rssismf2g, 0);
nvram_read_u8(prefix, NULL, "bxa5g", &sprom->bxa5g, 0);
nvram_read_u8(prefix, NULL, "rssisav5g", &sprom->rssisav5g, 0);
nvram_read_u8(prefix, NULL, "rssismc5g", &sprom->rssismc5g, 0);
nvram_read_u8(prefix, NULL, "rssismf5g", &sprom->rssismf5g, 0);
nvram_read_u8(prefix, NULL, "tri2g", &sprom->tri2g, 0);
nvram_read_u8(prefix, NULL, "tri5g", &sprom->tri5g, 0);
nvram_read_u8(prefix, NULL, "tri5gl", &sprom->tri5gl, 0);
nvram_read_u8(prefix, NULL, "tri5gh", &sprom->tri5gh, 0);
nvram_read_s8(prefix, NULL, "rxpo2g", &sprom->rxpo2g, 0);
nvram_read_s8(prefix, NULL, "rxpo5g", &sprom->rxpo5g, 0);
nvram_read_u8(prefix, NULL, "bxa2g", &sprom->bxa2g, 0, fallback);
nvram_read_u8(prefix, NULL, "rssisav2g", &sprom->rssisav2g, 0,
fallback);
nvram_read_u8(prefix, NULL, "rssismc2g", &sprom->rssismc2g, 0,
fallback);
nvram_read_u8(prefix, NULL, "rssismf2g", &sprom->rssismf2g, 0,
fallback);
nvram_read_u8(prefix, NULL, "bxa5g", &sprom->bxa5g, 0, fallback);
nvram_read_u8(prefix, NULL, "rssisav5g", &sprom->rssisav5g, 0,
fallback);
nvram_read_u8(prefix, NULL, "rssismc5g", &sprom->rssismc5g, 0,
fallback);
nvram_read_u8(prefix, NULL, "rssismf5g", &sprom->rssismf5g, 0,
fallback);
nvram_read_u8(prefix, NULL, "tri2g", &sprom->tri2g, 0, fallback);
nvram_read_u8(prefix, NULL, "tri5g", &sprom->tri5g, 0, fallback);
nvram_read_u8(prefix, NULL, "tri5gl", &sprom->tri5gl, 0, fallback);
nvram_read_u8(prefix, NULL, "tri5gh", &sprom->tri5gh, 0, fallback);
nvram_read_s8(prefix, NULL, "rxpo2g", &sprom->rxpo2g, 0, fallback);
nvram_read_s8(prefix, NULL, "rxpo5g", &sprom->rxpo5g, 0, fallback);
}
static void bcm47xx_fill_sprom_r3(struct ssb_sprom *sprom, const char *prefix)
static void bcm47xx_fill_sprom_r3(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
&sprom->boardflags_hi);
nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0);
nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0);
nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0, fallback);
nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
&sprom->leddc_off_time);
&sprom->leddc_off_time, fallback);
}
static void bcm47xx_fill_sprom_r4589(struct ssb_sprom *sprom,
const char *prefix)
const char *prefix, bool fallback)
{
nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
&sprom->boardflags_hi);
nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
&sprom->boardflags2_hi);
nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0);
nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0);
nvram_read_s8(prefix, NULL, "ag2", &sprom->antenna_gain.a2, 0);
nvram_read_s8(prefix, NULL, "ag3", &sprom->antenna_gain.a3, 0);
nvram_read_u8(prefix, NULL, "txchain", &sprom->txchain, 0xf);
nvram_read_u8(prefix, NULL, "rxchain", &sprom->rxchain, 0xf);
nvram_read_u8(prefix, NULL, "antswitch", &sprom->antswitch, 0xff);
nvram_read_u8(prefix, NULL, "regrev", &sprom->regrev, 0, fallback);
nvram_read_s8(prefix, NULL, "ag2", &sprom->antenna_gain.a2, 0,
fallback);
nvram_read_s8(prefix, NULL, "ag3", &sprom->antenna_gain.a3, 0,
fallback);
nvram_read_u8(prefix, NULL, "txchain", &sprom->txchain, 0xf, fallback);
nvram_read_u8(prefix, NULL, "rxchain", &sprom->rxchain, 0xf, fallback);
nvram_read_u8(prefix, NULL, "antswitch", &sprom->antswitch, 0xff,
fallback);
nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
&sprom->leddc_off_time);
&sprom->leddc_off_time, fallback);
}
static void bcm47xx_fill_sprom_r458(struct ssb_sprom *sprom, const char *prefix)
static void bcm47xx_fill_sprom_r458(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
nvram_read_u16(prefix, NULL, "cck2gpo", &sprom->cck2gpo, 0);
nvram_read_u32(prefix, NULL, "ofdm2gpo", &sprom->ofdm2gpo, 0);
nvram_read_u32(prefix, NULL, "ofdm5gpo", &sprom->ofdm5gpo, 0);
nvram_read_u32(prefix, NULL, "ofdm5glpo", &sprom->ofdm5glpo, 0);
nvram_read_u32(prefix, NULL, "ofdm5ghpo", &sprom->ofdm5ghpo, 0);
nvram_read_u16(prefix, NULL, "cddpo", &sprom->cddpo, 0);
nvram_read_u16(prefix, NULL, "stbcpo", &sprom->stbcpo, 0);
nvram_read_u16(prefix, NULL, "bw40po", &sprom->bw40po, 0);
nvram_read_u16(prefix, NULL, "bwduppo", &sprom->bwduppo, 0);
nvram_read_u16(prefix, NULL, "mcs2gpo0", &sprom->mcs2gpo[0], 0);
nvram_read_u16(prefix, NULL, "mcs2gpo1", &sprom->mcs2gpo[1], 0);
nvram_read_u16(prefix, NULL, "mcs2gpo2", &sprom->mcs2gpo[2], 0);
nvram_read_u16(prefix, NULL, "mcs2gpo3", &sprom->mcs2gpo[3], 0);
nvram_read_u16(prefix, NULL, "mcs2gpo4", &sprom->mcs2gpo[4], 0);
nvram_read_u16(prefix, NULL, "mcs2gpo5", &sprom->mcs2gpo[5], 0);
nvram_read_u16(prefix, NULL, "mcs2gpo6", &sprom->mcs2gpo[6], 0);
nvram_read_u16(prefix, NULL, "mcs2gpo7", &sprom->mcs2gpo[7], 0);
nvram_read_u16(prefix, NULL, "mcs5gpo0", &sprom->mcs5gpo[0], 0);
nvram_read_u16(prefix, NULL, "mcs5gpo1", &sprom->mcs5gpo[1], 0);
nvram_read_u16(prefix, NULL, "mcs5gpo2", &sprom->mcs5gpo[2], 0);
nvram_read_u16(prefix, NULL, "mcs5gpo3", &sprom->mcs5gpo[3], 0);
nvram_read_u16(prefix, NULL, "mcs5gpo4", &sprom->mcs5gpo[4], 0);
nvram_read_u16(prefix, NULL, "mcs5gpo5", &sprom->mcs5gpo[5], 0);
nvram_read_u16(prefix, NULL, "mcs5gpo6", &sprom->mcs5gpo[6], 0);
nvram_read_u16(prefix, NULL, "mcs5gpo7", &sprom->mcs5gpo[7], 0);
nvram_read_u16(prefix, NULL, "mcs5glpo0", &sprom->mcs5glpo[0], 0);
nvram_read_u16(prefix, NULL, "mcs5glpo1", &sprom->mcs5glpo[1], 0);
nvram_read_u16(prefix, NULL, "mcs5glpo2", &sprom->mcs5glpo[2], 0);
nvram_read_u16(prefix, NULL, "mcs5glpo3", &sprom->mcs5glpo[3], 0);
nvram_read_u16(prefix, NULL, "mcs5glpo4", &sprom->mcs5glpo[4], 0);
nvram_read_u16(prefix, NULL, "mcs5glpo5", &sprom->mcs5glpo[5], 0);
nvram_read_u16(prefix, NULL, "mcs5glpo6", &sprom->mcs5glpo[6], 0);
nvram_read_u16(prefix, NULL, "mcs5glpo7", &sprom->mcs5glpo[7], 0);
nvram_read_u16(prefix, NULL, "mcs5ghpo0", &sprom->mcs5ghpo[0], 0);
nvram_read_u16(prefix, NULL, "mcs5ghpo1", &sprom->mcs5ghpo[1], 0);
nvram_read_u16(prefix, NULL, "mcs5ghpo2", &sprom->mcs5ghpo[2], 0);
nvram_read_u16(prefix, NULL, "mcs5ghpo3", &sprom->mcs5ghpo[3], 0);
nvram_read_u16(prefix, NULL, "mcs5ghpo4", &sprom->mcs5ghpo[4], 0);
nvram_read_u16(prefix, NULL, "mcs5ghpo5", &sprom->mcs5ghpo[5], 0);
nvram_read_u16(prefix, NULL, "mcs5ghpo6", &sprom->mcs5ghpo[6], 0);
nvram_read_u16(prefix, NULL, "mcs5ghpo7", &sprom->mcs5ghpo[7], 0);
nvram_read_u16(prefix, NULL, "cck2gpo", &sprom->cck2gpo, 0, fallback);
nvram_read_u32(prefix, NULL, "ofdm2gpo", &sprom->ofdm2gpo, 0, fallback);
nvram_read_u32(prefix, NULL, "ofdm5gpo", &sprom->ofdm5gpo, 0, fallback);
nvram_read_u32(prefix, NULL, "ofdm5glpo", &sprom->ofdm5glpo, 0,
fallback);
nvram_read_u32(prefix, NULL, "ofdm5ghpo", &sprom->ofdm5ghpo, 0,
fallback);
nvram_read_u16(prefix, NULL, "cddpo", &sprom->cddpo, 0, fallback);
nvram_read_u16(prefix, NULL, "stbcpo", &sprom->stbcpo, 0, fallback);
nvram_read_u16(prefix, NULL, "bw40po", &sprom->bw40po, 0, fallback);
nvram_read_u16(prefix, NULL, "bwduppo", &sprom->bwduppo, 0, fallback);
nvram_read_u16(prefix, NULL, "mcs2gpo0", &sprom->mcs2gpo[0], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs2gpo1", &sprom->mcs2gpo[1], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs2gpo2", &sprom->mcs2gpo[2], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs2gpo3", &sprom->mcs2gpo[3], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs2gpo4", &sprom->mcs2gpo[4], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs2gpo5", &sprom->mcs2gpo[5], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs2gpo6", &sprom->mcs2gpo[6], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs2gpo7", &sprom->mcs2gpo[7], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5gpo0", &sprom->mcs5gpo[0], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5gpo1", &sprom->mcs5gpo[1], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5gpo2", &sprom->mcs5gpo[2], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5gpo3", &sprom->mcs5gpo[3], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5gpo4", &sprom->mcs5gpo[4], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5gpo5", &sprom->mcs5gpo[5], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5gpo6", &sprom->mcs5gpo[6], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5gpo7", &sprom->mcs5gpo[7], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5glpo0", &sprom->mcs5glpo[0], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5glpo1", &sprom->mcs5glpo[1], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5glpo2", &sprom->mcs5glpo[2], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5glpo3", &sprom->mcs5glpo[3], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5glpo4", &sprom->mcs5glpo[4], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5glpo5", &sprom->mcs5glpo[5], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5glpo6", &sprom->mcs5glpo[6], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5glpo7", &sprom->mcs5glpo[7], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5ghpo0", &sprom->mcs5ghpo[0], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5ghpo1", &sprom->mcs5ghpo[1], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5ghpo2", &sprom->mcs5ghpo[2], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5ghpo3", &sprom->mcs5ghpo[3], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5ghpo4", &sprom->mcs5ghpo[4], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5ghpo5", &sprom->mcs5ghpo[5], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5ghpo6", &sprom->mcs5ghpo[6], 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs5ghpo7", &sprom->mcs5ghpo[7], 0,
fallback);
}
static void bcm47xx_fill_sprom_r45(struct ssb_sprom *sprom, const char *prefix)
static void bcm47xx_fill_sprom_r45(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
nvram_read_u8(prefix, NULL, "txpid2ga0", &sprom->txpid2g[0], 0);
nvram_read_u8(prefix, NULL, "txpid2ga1", &sprom->txpid2g[1], 0);
nvram_read_u8(prefix, NULL, "txpid2ga2", &sprom->txpid2g[2], 0);
nvram_read_u8(prefix, NULL, "txpid2ga3", &sprom->txpid2g[3], 0);
nvram_read_u8(prefix, NULL, "txpid5ga0", &sprom->txpid5g[0], 0);
nvram_read_u8(prefix, NULL, "txpid5ga1", &sprom->txpid5g[1], 0);
nvram_read_u8(prefix, NULL, "txpid5ga2", &sprom->txpid5g[2], 0);
nvram_read_u8(prefix, NULL, "txpid5ga3", &sprom->txpid5g[3], 0);
nvram_read_u8(prefix, NULL, "txpid5gla0", &sprom->txpid5gl[0], 0);
nvram_read_u8(prefix, NULL, "txpid5gla1", &sprom->txpid5gl[1], 0);
nvram_read_u8(prefix, NULL, "txpid5gla2", &sprom->txpid5gl[2], 0);
nvram_read_u8(prefix, NULL, "txpid5gla3", &sprom->txpid5gl[3], 0);
nvram_read_u8(prefix, NULL, "txpid5gha0", &sprom->txpid5gh[0], 0);
nvram_read_u8(prefix, NULL, "txpid5gha1", &sprom->txpid5gh[1], 0);
nvram_read_u8(prefix, NULL, "txpid5gha2", &sprom->txpid5gh[2], 0);
nvram_read_u8(prefix, NULL, "txpid5gha3", &sprom->txpid5gh[3], 0);
nvram_read_u8(prefix, NULL, "txpid2ga0", &sprom->txpid2g[0], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid2ga1", &sprom->txpid2g[1], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid2ga2", &sprom->txpid2g[2], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid2ga3", &sprom->txpid2g[3], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5ga0", &sprom->txpid5g[0], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5ga1", &sprom->txpid5g[1], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5ga2", &sprom->txpid5g[2], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5ga3", &sprom->txpid5g[3], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5gla0", &sprom->txpid5gl[0], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5gla1", &sprom->txpid5gl[1], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5gla2", &sprom->txpid5gl[2], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5gla3", &sprom->txpid5gl[3], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5gha0", &sprom->txpid5gh[0], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5gha1", &sprom->txpid5gh[1], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5gha2", &sprom->txpid5gh[2], 0,
fallback);
nvram_read_u8(prefix, NULL, "txpid5gha3", &sprom->txpid5gh[3], 0,
fallback);
}
static void bcm47xx_fill_sprom_r89(struct ssb_sprom *sprom, const char *prefix)
static void bcm47xx_fill_sprom_r89(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
nvram_read_u8(prefix, NULL, "tssipos2g", &sprom->fem.ghz2.tssipos, 0);
nvram_read_u8(prefix, NULL, "tssipos2g", &sprom->fem.ghz2.tssipos, 0,
fallback);
nvram_read_u8(prefix, NULL, "extpagain2g",
&sprom->fem.ghz2.extpa_gain, 0);
&sprom->fem.ghz2.extpa_gain, 0, fallback);
nvram_read_u8(prefix, NULL, "pdetrange2g",
&sprom->fem.ghz2.pdet_range, 0);
nvram_read_u8(prefix, NULL, "triso2g", &sprom->fem.ghz2.tr_iso, 0);
nvram_read_u8(prefix, NULL, "antswctl2g", &sprom->fem.ghz2.antswlut, 0);
nvram_read_u8(prefix, NULL, "tssipos5g", &sprom->fem.ghz5.tssipos, 0);
&sprom->fem.ghz2.pdet_range, 0, fallback);
nvram_read_u8(prefix, NULL, "triso2g", &sprom->fem.ghz2.tr_iso, 0,
fallback);
nvram_read_u8(prefix, NULL, "antswctl2g", &sprom->fem.ghz2.antswlut, 0,
fallback);
nvram_read_u8(prefix, NULL, "tssipos5g", &sprom->fem.ghz5.tssipos, 0,
fallback);
nvram_read_u8(prefix, NULL, "extpagain5g",
&sprom->fem.ghz5.extpa_gain, 0);
&sprom->fem.ghz5.extpa_gain, 0, fallback);
nvram_read_u8(prefix, NULL, "pdetrange5g",
&sprom->fem.ghz5.pdet_range, 0);
nvram_read_u8(prefix, NULL, "triso5g", &sprom->fem.ghz5.tr_iso, 0);
nvram_read_u8(prefix, NULL, "antswctl5g", &sprom->fem.ghz5.antswlut, 0);
nvram_read_u8(prefix, NULL, "tempthresh", &sprom->tempthresh, 0);
nvram_read_u8(prefix, NULL, "tempoffset", &sprom->tempoffset, 0);
nvram_read_u16(prefix, NULL, "rawtempsense", &sprom->rawtempsense, 0);
nvram_read_u8(prefix, NULL, "measpower", &sprom->measpower, 0);
&sprom->fem.ghz5.pdet_range, 0, fallback);
nvram_read_u8(prefix, NULL, "triso5g", &sprom->fem.ghz5.tr_iso, 0,
fallback);
nvram_read_u8(prefix, NULL, "antswctl5g", &sprom->fem.ghz5.antswlut, 0,
fallback);
nvram_read_u8(prefix, NULL, "tempthresh", &sprom->tempthresh, 0,
fallback);
nvram_read_u8(prefix, NULL, "tempoffset", &sprom->tempoffset, 0,
fallback);
nvram_read_u16(prefix, NULL, "rawtempsense", &sprom->rawtempsense, 0,
fallback);
nvram_read_u8(prefix, NULL, "measpower", &sprom->measpower, 0,
fallback);
nvram_read_u8(prefix, NULL, "tempsense_slope",
&sprom->tempsense_slope, 0);
nvram_read_u8(prefix, NULL, "tempcorrx", &sprom->tempcorrx, 0);
&sprom->tempsense_slope, 0, fallback);
nvram_read_u8(prefix, NULL, "tempcorrx", &sprom->tempcorrx, 0,
fallback);
nvram_read_u8(prefix, NULL, "tempsense_option",
&sprom->tempsense_option, 0);
&sprom->tempsense_option, 0, fallback);
nvram_read_u8(prefix, NULL, "freqoffset_corr",
&sprom->freqoffset_corr, 0);
nvram_read_u8(prefix, NULL, "iqcal_swp_dis", &sprom->iqcal_swp_dis, 0);
nvram_read_u8(prefix, NULL, "hw_iqcal_en", &sprom->hw_iqcal_en, 0);
nvram_read_u8(prefix, NULL, "elna2g", &sprom->elna2g, 0);
nvram_read_u8(prefix, NULL, "elna5g", &sprom->elna5g, 0);
&sprom->freqoffset_corr, 0, fallback);
nvram_read_u8(prefix, NULL, "iqcal_swp_dis", &sprom->iqcal_swp_dis, 0,
fallback);
nvram_read_u8(prefix, NULL, "hw_iqcal_en", &sprom->hw_iqcal_en, 0,
fallback);
nvram_read_u8(prefix, NULL, "elna2g", &sprom->elna2g, 0, fallback);
nvram_read_u8(prefix, NULL, "elna5g", &sprom->elna5g, 0, fallback);
nvram_read_u8(prefix, NULL, "phycal_tempdelta",
&sprom->phycal_tempdelta, 0);
nvram_read_u8(prefix, NULL, "temps_period", &sprom->temps_period, 0);
&sprom->phycal_tempdelta, 0, fallback);
nvram_read_u8(prefix, NULL, "temps_period", &sprom->temps_period, 0,
fallback);
nvram_read_u8(prefix, NULL, "temps_hysteresis",
&sprom->temps_hysteresis, 0);
nvram_read_u8(prefix, NULL, "measpower1", &sprom->measpower1, 0);
nvram_read_u8(prefix, NULL, "measpower2", &sprom->measpower2, 0);
&sprom->temps_hysteresis, 0, fallback);
nvram_read_u8(prefix, NULL, "measpower1", &sprom->measpower1, 0,
fallback);
nvram_read_u8(prefix, NULL, "measpower2", &sprom->measpower2, 0,
fallback);
nvram_read_u8(prefix, NULL, "rxgainerr2ga0",
&sprom->rxgainerr2ga[0], 0);
&sprom->rxgainerr2ga[0], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr2ga1",
&sprom->rxgainerr2ga[1], 0);
&sprom->rxgainerr2ga[1], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr2ga2",
&sprom->rxgainerr2ga[2], 0);
&sprom->rxgainerr2ga[2], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gla0",
&sprom->rxgainerr5gla[0], 0);
&sprom->rxgainerr5gla[0], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gla1",
&sprom->rxgainerr5gla[1], 0);
&sprom->rxgainerr5gla[1], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gla2",
&sprom->rxgainerr5gla[2], 0);
&sprom->rxgainerr5gla[2], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gma0",
&sprom->rxgainerr5gma[0], 0);
&sprom->rxgainerr5gma[0], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gma1",
&sprom->rxgainerr5gma[1], 0);
&sprom->rxgainerr5gma[1], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gma2",
&sprom->rxgainerr5gma[2], 0);
&sprom->rxgainerr5gma[2], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gha0",
&sprom->rxgainerr5gha[0], 0);
&sprom->rxgainerr5gha[0], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gha1",
&sprom->rxgainerr5gha[1], 0);
&sprom->rxgainerr5gha[1], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gha2",
&sprom->rxgainerr5gha[2], 0);
&sprom->rxgainerr5gha[2], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gua0",
&sprom->rxgainerr5gua[0], 0);
&sprom->rxgainerr5gua[0], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gua1",
&sprom->rxgainerr5gua[1], 0);
&sprom->rxgainerr5gua[1], 0, fallback);
nvram_read_u8(prefix, NULL, "rxgainerr5gua2",
&sprom->rxgainerr5gua[2], 0);
nvram_read_u8(prefix, NULL, "noiselvl2ga0", &sprom->noiselvl2ga[0], 0);
nvram_read_u8(prefix, NULL, "noiselvl2ga1", &sprom->noiselvl2ga[1], 0);
nvram_read_u8(prefix, NULL, "noiselvl2ga2", &sprom->noiselvl2ga[2], 0);
&sprom->rxgainerr5gua[2], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl2ga0", &sprom->noiselvl2ga[0], 0,
fallback);
nvram_read_u8(prefix, NULL, "noiselvl2ga1", &sprom->noiselvl2ga[1], 0,
fallback);
nvram_read_u8(prefix, NULL, "noiselvl2ga2", &sprom->noiselvl2ga[2], 0,
fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gla0",
&sprom->noiselvl5gla[0], 0);
&sprom->noiselvl5gla[0], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gla1",
&sprom->noiselvl5gla[1], 0);
&sprom->noiselvl5gla[1], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gla2",
&sprom->noiselvl5gla[2], 0);
&sprom->noiselvl5gla[2], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gma0",
&sprom->noiselvl5gma[0], 0);
&sprom->noiselvl5gma[0], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gma1",
&sprom->noiselvl5gma[1], 0);
&sprom->noiselvl5gma[1], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gma2",
&sprom->noiselvl5gma[2], 0);
&sprom->noiselvl5gma[2], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gha0",
&sprom->noiselvl5gha[0], 0);
&sprom->noiselvl5gha[0], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gha1",
&sprom->noiselvl5gha[1], 0);
&sprom->noiselvl5gha[1], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gha2",
&sprom->noiselvl5gha[2], 0);
&sprom->noiselvl5gha[2], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gua0",
&sprom->noiselvl5gua[0], 0);
&sprom->noiselvl5gua[0], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gua1",
&sprom->noiselvl5gua[1], 0);
&sprom->noiselvl5gua[1], 0, fallback);
nvram_read_u8(prefix, NULL, "noiselvl5gua2",
&sprom->noiselvl5gua[2], 0);
&sprom->noiselvl5gua[2], 0, fallback);
nvram_read_u8(prefix, NULL, "pcieingress_war",
&sprom->pcieingress_war, 0);
&sprom->pcieingress_war, 0, fallback);
}
static void bcm47xx_fill_sprom_r9(struct ssb_sprom *sprom, const char *prefix)
static void bcm47xx_fill_sprom_r9(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
nvram_read_u16(prefix, NULL, "cckbw202gpo", &sprom->cckbw202gpo, 0);
nvram_read_u16(prefix, NULL, "cckbw20ul2gpo", &sprom->cckbw20ul2gpo, 0);
nvram_read_u16(prefix, NULL, "cckbw202gpo", &sprom->cckbw202gpo, 0,
fallback);
nvram_read_u16(prefix, NULL, "cckbw20ul2gpo", &sprom->cckbw20ul2gpo, 0,
fallback);
nvram_read_u32(prefix, NULL, "legofdmbw202gpo",
&sprom->legofdmbw202gpo, 0);
&sprom->legofdmbw202gpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw20ul2gpo",
&sprom->legofdmbw20ul2gpo, 0);
&sprom->legofdmbw20ul2gpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw205glpo",
&sprom->legofdmbw205glpo, 0);
&sprom->legofdmbw205glpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw20ul5glpo",
&sprom->legofdmbw20ul5glpo, 0);
&sprom->legofdmbw20ul5glpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw205gmpo",
&sprom->legofdmbw205gmpo, 0);
&sprom->legofdmbw205gmpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw20ul5gmpo",
&sprom->legofdmbw20ul5gmpo, 0);
&sprom->legofdmbw20ul5gmpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw205ghpo",
&sprom->legofdmbw205ghpo, 0);
&sprom->legofdmbw205ghpo, 0, fallback);
nvram_read_u32(prefix, NULL, "legofdmbw20ul5ghpo",
&sprom->legofdmbw20ul5ghpo, 0);
nvram_read_u32(prefix, NULL, "mcsbw202gpo", &sprom->mcsbw202gpo, 0);
nvram_read_u32(prefix, NULL, "mcsbw20ul2gpo", &sprom->mcsbw20ul2gpo, 0);
nvram_read_u32(prefix, NULL, "mcsbw402gpo", &sprom->mcsbw402gpo, 0);
nvram_read_u32(prefix, NULL, "mcsbw205glpo", &sprom->mcsbw205glpo, 0);
&sprom->legofdmbw20ul5ghpo, 0, fallback);
nvram_read_u32(prefix, NULL, "mcsbw202gpo", &sprom->mcsbw202gpo, 0,
fallback);
nvram_read_u32(prefix, NULL, "mcsbw20ul2gpo", &sprom->mcsbw20ul2gpo, 0,
fallback);
nvram_read_u32(prefix, NULL, "mcsbw402gpo", &sprom->mcsbw402gpo, 0,
fallback);
nvram_read_u32(prefix, NULL, "mcsbw205glpo", &sprom->mcsbw205glpo, 0,
fallback);
nvram_read_u32(prefix, NULL, "mcsbw20ul5glpo",
&sprom->mcsbw20ul5glpo, 0);
nvram_read_u32(prefix, NULL, "mcsbw405glpo", &sprom->mcsbw405glpo, 0);
nvram_read_u32(prefix, NULL, "mcsbw205gmpo", &sprom->mcsbw205gmpo, 0);
&sprom->mcsbw20ul5glpo, 0, fallback);
nvram_read_u32(prefix, NULL, "mcsbw405glpo", &sprom->mcsbw405glpo, 0,
fallback);
nvram_read_u32(prefix, NULL, "mcsbw205gmpo", &sprom->mcsbw205gmpo, 0,
fallback);
nvram_read_u32(prefix, NULL, "mcsbw20ul5gmpo",
&sprom->mcsbw20ul5gmpo, 0);
nvram_read_u32(prefix, NULL, "mcsbw405gmpo", &sprom->mcsbw405gmpo, 0);
nvram_read_u32(prefix, NULL, "mcsbw205ghpo", &sprom->mcsbw205ghpo, 0);
&sprom->mcsbw20ul5gmpo, 0, fallback);
nvram_read_u32(prefix, NULL, "mcsbw405gmpo", &sprom->mcsbw405gmpo, 0,
fallback);
nvram_read_u32(prefix, NULL, "mcsbw205ghpo", &sprom->mcsbw205ghpo, 0,
fallback);
nvram_read_u32(prefix, NULL, "mcsbw20ul5ghpo",
&sprom->mcsbw20ul5ghpo, 0);
nvram_read_u32(prefix, NULL, "mcsbw405ghpo", &sprom->mcsbw405ghpo, 0);
nvram_read_u16(prefix, NULL, "mcs32po", &sprom->mcs32po, 0);
&sprom->mcsbw20ul5ghpo, 0, fallback);
nvram_read_u32(prefix, NULL, "mcsbw405ghpo", &sprom->mcsbw405ghpo, 0,
fallback);
nvram_read_u16(prefix, NULL, "mcs32po", &sprom->mcs32po, 0, fallback);
nvram_read_u16(prefix, NULL, "legofdm40duppo",
&sprom->legofdm40duppo, 0);
nvram_read_u8(prefix, NULL, "sar2g", &sprom->sar2g, 0);
nvram_read_u8(prefix, NULL, "sar5g", &sprom->sar5g, 0);
&sprom->legofdm40duppo, 0, fallback);
nvram_read_u8(prefix, NULL, "sar2g", &sprom->sar2g, 0, fallback);
nvram_read_u8(prefix, NULL, "sar5g", &sprom->sar5g, 0, fallback);
}
static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
const char *prefix)
const char *prefix, bool fallback)
{
char postfix[2];
int i;
......@@ -483,46 +572,46 @@ static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i];
snprintf(postfix, sizeof(postfix), "%i", i);
nvram_read_u8(prefix, postfix, "maxp2ga",
&pwr_info->maxpwr_2g, 0);
&pwr_info->maxpwr_2g, 0, fallback);
nvram_read_u8(prefix, postfix, "itt2ga",
&pwr_info->itssi_2g, 0);
&pwr_info->itssi_2g, 0, fallback);
nvram_read_u8(prefix, postfix, "itt5ga",
&pwr_info->itssi_5g, 0);
&pwr_info->itssi_5g, 0, fallback);
nvram_read_u16(prefix, postfix, "pa2gw0a",
&pwr_info->pa_2g[0], 0);
&pwr_info->pa_2g[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa2gw1a",
&pwr_info->pa_2g[1], 0);
&pwr_info->pa_2g[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa2gw2a",
&pwr_info->pa_2g[2], 0);
&pwr_info->pa_2g[2], 0, fallback);
nvram_read_u8(prefix, postfix, "maxp5ga",
&pwr_info->maxpwr_5g, 0);
&pwr_info->maxpwr_5g, 0, fallback);
nvram_read_u8(prefix, postfix, "maxp5gha",
&pwr_info->maxpwr_5gh, 0);
&pwr_info->maxpwr_5gh, 0, fallback);
nvram_read_u8(prefix, postfix, "maxp5gla",
&pwr_info->maxpwr_5gl, 0);
&pwr_info->maxpwr_5gl, 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw0a",
&pwr_info->pa_5g[0], 0);
&pwr_info->pa_5g[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw1a",
&pwr_info->pa_5g[1], 0);
&pwr_info->pa_5g[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw2a",
&pwr_info->pa_5g[2], 0);
&pwr_info->pa_5g[2], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw0a",
&pwr_info->pa_5gl[0], 0);
&pwr_info->pa_5gl[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw1a",
&pwr_info->pa_5gl[1], 0);
&pwr_info->pa_5gl[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw2a",
&pwr_info->pa_5gl[2], 0);
&pwr_info->pa_5gl[2], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw0a",
&pwr_info->pa_5gh[0], 0);
&pwr_info->pa_5gh[0], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw1a",
&pwr_info->pa_5gh[1], 0);
&pwr_info->pa_5gh[1], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw2a",
&pwr_info->pa_5gh[2], 0);
&pwr_info->pa_5gh[2], 0, fallback);
}
}
static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
const char *prefix)
const char *prefix, bool fallback)
{
char postfix[2];
int i;
......@@ -531,91 +620,112 @@ static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
struct ssb_sprom_core_pwr_info *pwr_info = &sprom->core_pwr_info[i];
snprintf(postfix, sizeof(postfix), "%i", i);
nvram_read_u16(prefix, postfix, "pa2gw3a",
&pwr_info->pa_2g[3], 0);
&pwr_info->pa_2g[3], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5gw3a",
&pwr_info->pa_5g[3], 0);
&pwr_info->pa_5g[3], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5glw3a",
&pwr_info->pa_5gl[3], 0);
&pwr_info->pa_5gl[3], 0, fallback);
nvram_read_u16(prefix, postfix, "pa5ghw3a",
&pwr_info->pa_5gh[3], 0);
&pwr_info->pa_5gh[3], 0, fallback);
}
}
void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix)
static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
{
nvram_read_macaddr(prefix, "et0macaddr", &sprom->et0mac);
nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0);
nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0);
nvram_read_macaddr(prefix, "et1macaddr", &sprom->et1mac);
nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0);
nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0);
nvram_read_macaddr(prefix, "et0macaddr", &sprom->et0mac, fallback);
nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0,
fallback);
nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0,
fallback);
nvram_read_macaddr(prefix, "et1macaddr", &sprom->et1mac, fallback);
nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0,
fallback);
nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0,
fallback);
nvram_read_macaddr(prefix, "macaddr", &sprom->il0mac, fallback);
nvram_read_macaddr(prefix, "il0macaddr", &sprom->il0mac, fallback);
}
nvram_read_macaddr(prefix, "macaddr", &sprom->il0mac);
nvram_read_macaddr(prefix, "il0macaddr", &sprom->il0mac);
static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0,
fallback);
nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0,
fallback);
nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0,
fallback);
nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
&sprom->boardflags_hi, fallback);
nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
&sprom->boardflags2_hi, fallback);
}
void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix)
void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
bcm47xx_fill_sprom_ethernet(sprom, prefix);
bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback);
bcm47xx_fill_board_data(sprom, prefix, fallback);
nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0);
nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback);
switch (sprom->revision) {
case 1:
bcm47xx_fill_sprom_r1234589(sprom, prefix);
bcm47xx_fill_sprom_r12389(sprom, prefix);
bcm47xx_fill_sprom_r1(sprom, prefix);
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r1(sprom, prefix, fallback);
break;
case 2:
bcm47xx_fill_sprom_r1234589(sprom, prefix);
bcm47xx_fill_sprom_r12389(sprom, prefix);
bcm47xx_fill_sprom_r2389(sprom, prefix);
bcm47xx_fill_sprom_r2(sprom, prefix);
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
break;
case 3:
bcm47xx_fill_sprom_r1234589(sprom, prefix);
bcm47xx_fill_sprom_r12389(sprom, prefix);
bcm47xx_fill_sprom_r2389(sprom, prefix);
bcm47xx_fill_sprom_r389(sprom, prefix);
bcm47xx_fill_sprom_r3(sprom, prefix);
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r3(sprom, prefix, fallback);
break;
case 4:
case 5:
bcm47xx_fill_sprom_r1234589(sprom, prefix);
bcm47xx_fill_sprom_r4589(sprom, prefix);
bcm47xx_fill_sprom_r458(sprom, prefix);
bcm47xx_fill_sprom_r45(sprom, prefix);
bcm47xx_fill_sprom_path_r4589(sprom, prefix);
bcm47xx_fill_sprom_path_r45(sprom, prefix);
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
bcm47xx_fill_sprom_r4589(sprom, prefix, fallback);
bcm47xx_fill_sprom_r458(sprom, prefix, fallback);
bcm47xx_fill_sprom_r45(sprom, prefix, fallback);
bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback);
break;
case 8:
bcm47xx_fill_sprom_r1234589(sprom, prefix);
bcm47xx_fill_sprom_r12389(sprom, prefix);
bcm47xx_fill_sprom_r2389(sprom, prefix);
bcm47xx_fill_sprom_r389(sprom, prefix);
bcm47xx_fill_sprom_r4589(sprom, prefix);
bcm47xx_fill_sprom_r458(sprom, prefix);
bcm47xx_fill_sprom_r89(sprom, prefix);
bcm47xx_fill_sprom_path_r4589(sprom, prefix);
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r4589(sprom, prefix, fallback);
bcm47xx_fill_sprom_r458(sprom, prefix, fallback);
bcm47xx_fill_sprom_r89(sprom, prefix, fallback);
bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
break;
case 9:
bcm47xx_fill_sprom_r1234589(sprom, prefix);
bcm47xx_fill_sprom_r12389(sprom, prefix);
bcm47xx_fill_sprom_r2389(sprom, prefix);
bcm47xx_fill_sprom_r389(sprom, prefix);
bcm47xx_fill_sprom_r4589(sprom, prefix);
bcm47xx_fill_sprom_r89(sprom, prefix);
bcm47xx_fill_sprom_r9(sprom, prefix);
bcm47xx_fill_sprom_path_r4589(sprom, prefix);
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r2389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r4589(sprom, prefix, fallback);
bcm47xx_fill_sprom_r89(sprom, prefix, fallback);
bcm47xx_fill_sprom_r9(sprom, prefix, fallback);
bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
break;
default:
pr_warn("Unsupported SPROM revision %d detected. Will extract"
" v1\n", sprom->revision);
sprom->revision = 1;
bcm47xx_fill_sprom_r1234589(sprom, prefix);
bcm47xx_fill_sprom_r12389(sprom, prefix);
bcm47xx_fill_sprom_r1(sprom, prefix);
bcm47xx_fill_sprom_r1234589(sprom, prefix, fallback);
bcm47xx_fill_sprom_r12389(sprom, prefix, fallback);
bcm47xx_fill_sprom_r1(sprom, prefix, fallback);
}
}
......@@ -623,11 +733,12 @@ void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix)
void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo,
const char *prefix)
{
nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0);
nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0,
true);
if (!boardinfo->vendor)
boardinfo->vendor = SSB_BOARDVENDOR_BCM;
nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0);
nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true);
}
#endif
......@@ -635,10 +746,11 @@ void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo,
void bcm47xx_fill_bcma_boardinfo(struct bcma_boardinfo *boardinfo,
const char *prefix)
{
nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0);
nvram_read_u16(prefix, NULL, "boardvendor", &boardinfo->vendor, 0,
true);
if (!boardinfo->vendor)
boardinfo->vendor = SSB_BOARDVENDOR_BCM;
nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0);
nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true);
}
#endif
......@@ -11,6 +11,7 @@
#include <linux/leds.h>
#include <linux/mtd/physmap.h>
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_embedded.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/gpio.h>
......@@ -116,7 +117,8 @@ static irqreturn_t gpio_interrupt(int irq, void *ignored)
/* Interrupt are level triggered, revert the interrupt polarity
to clear the interrupt. */
gpio_polarity(WGT634U_GPIO_RESET, state);
ssb_gpio_polarity(&bcm47xx_bus.ssb, 1 << WGT634U_GPIO_RESET,
state ? 1 << WGT634U_GPIO_RESET : 0);
if (!state) {
printk(KERN_INFO "Reset button pressed");
......@@ -150,7 +152,9 @@ static int __init wgt634u_init(void)
gpio_interrupt, IRQF_SHARED,
"WGT634U GPIO", &bcm47xx_bus.ssb.chipco)) {
gpio_direction_input(WGT634U_GPIO_RESET);
gpio_intmask(WGT634U_GPIO_RESET, 1);
ssb_gpio_intmask(&bcm47xx_bus.ssb,
1 << WGT634U_GPIO_RESET,
1 << WGT634U_GPIO_RESET);
ssb_chipco_irq_mask(&bcm47xx_bus.ssb.chipco,
SSB_CHIPCO_IRQ_GPIO,
SSB_CHIPCO_IRQ_GPIO);
......
obj-y += clk.o cpu.o cs.o gpio.o irq.o prom.o setup.o timer.o \
dev-dsp.o dev-enet.o dev-flash.o dev-pcmcia.o dev-rng.o \
dev-spi.o dev-uart.o dev-wdt.o dev-usb-usbd.o
obj-y += clk.o cpu.o cs.o gpio.o irq.o nvram.o prom.o reset.o \
setup.o timer.o dev-dsp.o dev-enet.o dev-flash.o \
dev-pcmcia.o dev-rng.o dev-spi.o dev-uart.o dev-wdt.o \
dev-usb-usbd.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-y += boards/
......@@ -18,6 +18,7 @@
#include <bcm63xx_dev_uart.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
#include <bcm63xx_nvram.h>
#include <bcm63xx_dev_pci.h>
#include <bcm63xx_dev_enet.h>
#include <bcm63xx_dev_dsp.h>
......@@ -29,8 +30,6 @@
#define PFX "board_bcm963xx: "
static struct bcm963xx_nvram nvram;
static unsigned int mac_addr_used;
static struct board_info board;
/*
......@@ -715,51 +714,15 @@ const char *board_get_name(void)
return board.name;
}
/*
* register & return a new board mac address
*/
static int board_get_mac_address(u8 *mac)
{
u8 *oui;
int count;
if (mac_addr_used >= nvram.mac_addr_count) {
printk(KERN_ERR PFX "not enough mac address\n");
return -ENODEV;
}
memcpy(mac, nvram.mac_addr_base, ETH_ALEN);
oui = mac + ETH_ALEN/2 - 1;
count = mac_addr_used;
while (count--) {
u8 *p = mac + ETH_ALEN - 1;
do {
(*p)++;
if (*p != 0)
break;
p--;
} while (p != oui);
if (p == oui) {
printk(KERN_ERR PFX "unable to fetch mac address\n");
return -ENODEV;
}
}
mac_addr_used++;
return 0;
}
/*
* early init callback, read nvram data from flash and checksum it
*/
void __init board_prom_init(void)
{
unsigned int check_len, i;
u8 *boot_addr, *cfe, *p;
unsigned int i;
u8 *boot_addr, *cfe;
char cfe_version[32];
char *board_name;
u32 val;
/* read base address of boot chip select (0)
......@@ -782,27 +745,15 @@ void __init board_prom_init(void)
strcpy(cfe_version, "unknown");
printk(KERN_INFO PFX "CFE version: %s\n", cfe_version);
/* extract nvram data */
memcpy(&nvram, boot_addr + BCM963XX_NVRAM_OFFSET, sizeof(nvram));
/* check checksum before using data */
if (nvram.version <= 4)
check_len = offsetof(struct bcm963xx_nvram, checksum_old);
else
check_len = sizeof(nvram);
val = 0;
p = (u8 *)&nvram;
while (check_len--)
val += *p;
if (val) {
if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) {
printk(KERN_ERR PFX "invalid nvram checksum\n");
return;
}
board_name = bcm63xx_nvram_get_name();
/* find board by name */
for (i = 0; i < ARRAY_SIZE(bcm963xx_boards); i++) {
if (strncmp(nvram.name, bcm963xx_boards[i]->name,
sizeof(nvram.name)))
if (strncmp(board_name, bcm963xx_boards[i]->name, 16))
continue;
/* copy, board desc array is marked initdata */
memcpy(&board, bcm963xx_boards[i], sizeof(board));
......@@ -812,7 +763,7 @@ void __init board_prom_init(void)
/* bail out if board is not found, will complain later */
if (!board.name[0]) {
char name[17];
memcpy(name, nvram.name, 16);
memcpy(name, board_name, 16);
name[16] = 0;
printk(KERN_ERR PFX "unknown bcm963xx board: %s\n",
name);
......@@ -890,11 +841,11 @@ int __init board_register_devices(void)
bcm63xx_pcmcia_register();
if (board.has_enet0 &&
!board_get_mac_address(board.enet0.mac_addr))
!bcm63xx_nvram_get_mac_address(board.enet0.mac_addr))
bcm63xx_enet_register(0, &board.enet0);
if (board.has_enet1 &&
!board_get_mac_address(board.enet1.mac_addr))
!bcm63xx_nvram_get_mac_address(board.enet1.mac_addr))
bcm63xx_enet_register(1, &board.enet1);
if (board.has_usbd)
......@@ -907,7 +858,7 @@ int __init board_register_devices(void)
* do this after registering enet devices
*/
#ifdef CONFIG_SSB_PCIHOST
if (!board_get_mac_address(bcm63xx_sprom.il0mac)) {
if (!bcm63xx_nvram_get_mac_address(bcm63xx_sprom.il0mac)) {
memcpy(bcm63xx_sprom.et0mac, bcm63xx_sprom.il0mac, ETH_ALEN);
memcpy(bcm63xx_sprom.et1mac, bcm63xx_sprom.il0mac, ETH_ALEN);
if (ssb_arch_register_fallback_sprom(
......
......@@ -14,6 +14,7 @@
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_reset.h>
#include <bcm63xx_clk.h>
static DEFINE_MUTEX(clocks_mutex);
......@@ -124,15 +125,10 @@ static void enetsw_set(struct clk *clk, int enable)
CKCTL_6368_SWPKT_USB_EN |
CKCTL_6368_SWPKT_SAR_EN, enable);
if (enable) {
u32 val;
/* reset switch core afer clock change */
val = bcm_perf_readl(PERF_SOFTRESET_6368_REG);
val &= ~SOFTRESET_6368_ENETSW_MASK;
bcm_perf_writel(val, PERF_SOFTRESET_6368_REG);
bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
msleep(10);
val |= SOFTRESET_6368_ENETSW_MASK;
bcm_perf_writel(val, PERF_SOFTRESET_6368_REG);
bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 0);
msleep(10);
}
}
......@@ -222,15 +218,10 @@ static void xtm_set(struct clk *clk, int enable)
CKCTL_6368_SWPKT_SAR_EN, enable);
if (enable) {
u32 val;
/* reset sar core afer clock change */
val = bcm_perf_readl(PERF_SOFTRESET_6368_REG);
val &= ~SOFTRESET_6368_SAR_MASK;
bcm_perf_writel(val, PERF_SOFTRESET_6368_REG);
bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 1);
mdelay(1);
val |= SOFTRESET_6368_SAR_MASK;
bcm_perf_writel(val, PERF_SOFTRESET_6368_REG);
bcm63xx_core_set_reset(BCM63XX_RESET_SAR, 0);
mdelay(1);
}
}
......@@ -252,6 +243,19 @@ static struct clk clk_ipsec = {
.set = ipsec_set,
};
/*
* PCIe clock
*/
static void pcie_set(struct clk *clk, int enable)
{
bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
}
static struct clk clk_pcie = {
.set = pcie_set,
};
/*
* Internal peripheral clock
*/
......@@ -313,6 +317,8 @@ struct clk *clk_get(struct device *dev, const char *id)
return &clk_pcm;
if (BCMCPU_IS_6368() && !strcmp(id, "ipsec"))
return &clk_ipsec;
if (BCMCPU_IS_6328() && !strcmp(id, "pcie"))
return &clk_pcie;
return ERR_PTR(-ENOENT);
}
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
* Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
* Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com>
*/
#define pr_fmt(fmt) "bcm63xx_nvram: " fmt
#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/if_ether.h>
#include <bcm63xx_nvram.h>
/*
* nvram structure
*/
struct bcm963xx_nvram {
u32 version;
u8 reserved1[256];
u8 name[16];
u32 main_tp_number;
u32 psi_size;
u32 mac_addr_count;
u8 mac_addr_base[ETH_ALEN];
u8 reserved2[2];
u32 checksum_old;
u8 reserved3[720];
u32 checksum_high;
};
static struct bcm963xx_nvram nvram;
static int mac_addr_used;
int __init bcm63xx_nvram_init(void *addr)
{
unsigned int check_len;
u32 crc, expected_crc;
/* extract nvram data */
memcpy(&nvram, addr, sizeof(nvram));
/* check checksum before using data */
if (nvram.version <= 4) {
check_len = offsetof(struct bcm963xx_nvram, reserved3);
expected_crc = nvram.checksum_old;
nvram.checksum_old = 0;
} else {
check_len = sizeof(nvram);
expected_crc = nvram.checksum_high;
nvram.checksum_high = 0;
}
crc = crc32_le(~0, (u8 *)&nvram, check_len);
if (crc != expected_crc)
return -EINVAL;
return 0;
}
u8 *bcm63xx_nvram_get_name(void)
{
return nvram.name;
}
EXPORT_SYMBOL(bcm63xx_nvram_get_name);
int bcm63xx_nvram_get_mac_address(u8 *mac)
{
u8 *oui;
int count;
if (mac_addr_used >= nvram.mac_addr_count) {
pr_err("not enough mac addresses\n");
return -ENODEV;
}
memcpy(mac, nvram.mac_addr_base, ETH_ALEN);
oui = mac + ETH_ALEN/2 - 1;
count = mac_addr_used;
while (count--) {
u8 *p = mac + ETH_ALEN - 1;
do {
(*p)++;
if (*p != 0)
break;
p--;
} while (p != oui);
if (p == oui) {
pr_err("unable to fetch mac address\n");
return -ENODEV;
}
}
mac_addr_used++;
return 0;
}
EXPORT_SYMBOL(bcm63xx_nvram_get_mac_address);
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com>
*/
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_reset.h>
#define __GEN_RESET_BITS_TABLE(__cpu) \
[BCM63XX_RESET_SPI] = BCM## __cpu ##_RESET_SPI, \
[BCM63XX_RESET_ENET] = BCM## __cpu ##_RESET_ENET, \
[BCM63XX_RESET_USBH] = BCM## __cpu ##_RESET_USBH, \
[BCM63XX_RESET_USBD] = BCM## __cpu ##_RESET_USBD, \
[BCM63XX_RESET_DSL] = BCM## __cpu ##_RESET_DSL, \
[BCM63XX_RESET_SAR] = BCM## __cpu ##_RESET_SAR, \
[BCM63XX_RESET_EPHY] = BCM## __cpu ##_RESET_EPHY, \
[BCM63XX_RESET_ENETSW] = BCM## __cpu ##_RESET_ENETSW, \
[BCM63XX_RESET_PCM] = BCM## __cpu ##_RESET_PCM, \
[BCM63XX_RESET_MPI] = BCM## __cpu ##_RESET_MPI, \
[BCM63XX_RESET_PCIE] = BCM## __cpu ##_RESET_PCIE, \
[BCM63XX_RESET_PCIE_EXT] = BCM## __cpu ##_RESET_PCIE_EXT,
#define BCM6328_RESET_SPI SOFTRESET_6328_SPI_MASK
#define BCM6328_RESET_ENET 0
#define BCM6328_RESET_USBH SOFTRESET_6328_USBH_MASK
#define BCM6328_RESET_USBD SOFTRESET_6328_USBS_MASK
#define BCM6328_RESET_DSL 0
#define BCM6328_RESET_SAR SOFTRESET_6328_SAR_MASK
#define BCM6328_RESET_EPHY SOFTRESET_6328_EPHY_MASK
#define BCM6328_RESET_ENETSW SOFTRESET_6328_ENETSW_MASK
#define BCM6328_RESET_PCM SOFTRESET_6328_PCM_MASK
#define BCM6328_RESET_MPI 0
#define BCM6328_RESET_PCIE \
(SOFTRESET_6328_PCIE_MASK | \
SOFTRESET_6328_PCIE_CORE_MASK | \
SOFTRESET_6328_PCIE_HARD_MASK)
#define BCM6328_RESET_PCIE_EXT SOFTRESET_6328_PCIE_EXT_MASK
#define BCM6338_RESET_SPI SOFTRESET_6338_SPI_MASK
#define BCM6338_RESET_ENET SOFTRESET_6338_ENET_MASK
#define BCM6338_RESET_USBH SOFTRESET_6338_USBH_MASK
#define BCM6338_RESET_USBD SOFTRESET_6338_USBS_MASK
#define BCM6338_RESET_DSL SOFTRESET_6338_ADSL_MASK
#define BCM6338_RESET_SAR SOFTRESET_6338_SAR_MASK
#define BCM6338_RESET_EPHY 0
#define BCM6338_RESET_ENETSW 0
#define BCM6338_RESET_PCM 0
#define BCM6338_RESET_MPI 0
#define BCM6338_RESET_PCIE 0
#define BCM6338_RESET_PCIE_EXT 0
#define BCM6348_RESET_SPI SOFTRESET_6348_SPI_MASK
#define BCM6348_RESET_ENET SOFTRESET_6348_ENET_MASK
#define BCM6348_RESET_USBH SOFTRESET_6348_USBH_MASK
#define BCM6348_RESET_USBD SOFTRESET_6348_USBS_MASK
#define BCM6348_RESET_DSL SOFTRESET_6348_ADSL_MASK
#define BCM6348_RESET_SAR SOFTRESET_6348_SAR_MASK
#define BCM6348_RESET_EPHY 0
#define BCM6348_RESET_ENETSW 0
#define BCM6348_RESET_PCM 0
#define BCM6348_RESET_MPI 0
#define BCM6348_RESET_PCIE 0
#define BCM6348_RESET_PCIE_EXT 0
#define BCM6358_RESET_SPI SOFTRESET_6358_SPI_MASK
#define BCM6358_RESET_ENET SOFTRESET_6358_ENET_MASK
#define BCM6358_RESET_USBH SOFTRESET_6358_USBH_MASK
#define BCM6358_RESET_USBD 0
#define BCM6358_RESET_DSL SOFTRESET_6358_ADSL_MASK
#define BCM6358_RESET_SAR SOFTRESET_6358_SAR_MASK
#define BCM6358_RESET_EPHY SOFTRESET_6358_EPHY_MASK
#define BCM6358_RESET_ENETSW 0
#define BCM6358_RESET_PCM SOFTRESET_6358_PCM_MASK
#define BCM6358_RESET_MPI SOFTRESET_6358_MPI_MASK
#define BCM6358_RESET_PCIE 0
#define BCM6358_RESET_PCIE_EXT 0
#define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK
#define BCM6368_RESET_ENET 0
#define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK
#define BCM6368_RESET_USBD SOFTRESET_6368_USBS_MASK
#define BCM6368_RESET_DSL 0
#define BCM6368_RESET_SAR SOFTRESET_6368_SAR_MASK
#define BCM6368_RESET_EPHY SOFTRESET_6368_EPHY_MASK
#define BCM6368_RESET_ENETSW 0
#define BCM6368_RESET_PCM SOFTRESET_6368_PCM_MASK
#define BCM6368_RESET_MPI SOFTRESET_6368_MPI_MASK
#define BCM6368_RESET_PCIE 0
#define BCM6368_RESET_PCIE_EXT 0
#ifdef BCMCPU_RUNTIME_DETECT
/*
* core reset bits
*/
static const u32 bcm6328_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6328)
};
static const u32 bcm6338_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6338)
};
static const u32 bcm6348_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6348)
};
static const u32 bcm6358_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6358)
};
static const u32 bcm6368_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6368)
};
const u32 *bcm63xx_reset_bits;
static int reset_reg;
static int __init bcm63xx_reset_bits_init(void)
{
if (BCMCPU_IS_6328()) {
reset_reg = PERF_SOFTRESET_6328_REG;
bcm63xx_reset_bits = bcm6328_reset_bits;
} else if (BCMCPU_IS_6338()) {
reset_reg = PERF_SOFTRESET_REG;
bcm63xx_reset_bits = bcm6338_reset_bits;
} else if (BCMCPU_IS_6348()) {
reset_reg = PERF_SOFTRESET_REG;
bcm63xx_reset_bits = bcm6348_reset_bits;
} else if (BCMCPU_IS_6358()) {
reset_reg = PERF_SOFTRESET_6358_REG;
bcm63xx_reset_bits = bcm6358_reset_bits;
} else if (BCMCPU_IS_6368()) {
reset_reg = PERF_SOFTRESET_6368_REG;
bcm63xx_reset_bits = bcm6368_reset_bits;
}
return 0;
}
#else
#ifdef CONFIG_BCM63XX_CPU_6328
static const u32 bcm63xx_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6328)
};
#define reset_reg PERF_SOFTRESET_6328_REG
#endif
#ifdef CONFIG_BCM63XX_CPU_6338
static const u32 bcm63xx_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6338)
};
#define reset_reg PERF_SOFTRESET_REG
#endif
#ifdef CONFIG_BCM63XX_CPU_6345
static const u32 bcm63xx_reset_bits[] = { };
#define reset_reg 0
#endif
#ifdef CONFIG_BCM63XX_CPU_6348
static const u32 bcm63xx_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6348)
};
#define reset_reg PERF_SOFTRESET_REG
#endif
#ifdef CONFIG_BCM63XX_CPU_6358
static const u32 bcm63xx_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6358)
};
#define reset_reg PERF_SOFTRESET_6358_REG
#endif
#ifdef CONFIG_BCM63XX_CPU_6368
static const u32 bcm63xx_reset_bits[] = {
__GEN_RESET_BITS_TABLE(6368)
};
#define reset_reg PERF_SOFTRESET_6368_REG
#endif
static int __init bcm63xx_reset_bits_init(void) { return 0; }
#endif
static DEFINE_SPINLOCK(reset_mutex);
static void __bcm63xx_core_set_reset(u32 mask, int enable)
{
unsigned long flags;
u32 val;
if (!mask)
return;
spin_lock_irqsave(&reset_mutex, flags);
val = bcm_perf_readl(reset_reg);
if (enable)
val &= ~mask;
else
val |= mask;
bcm_perf_writel(val, reset_reg);
spin_unlock_irqrestore(&reset_mutex, flags);
}
void bcm63xx_core_set_reset(enum bcm63xx_core_reset core, int reset)
{
__bcm63xx_core_set_reset(bcm63xx_reset_bits[core], reset);
}
EXPORT_SYMBOL(bcm63xx_core_set_reset);
postcore_initcall(bcm63xx_reset_bits_init);
......@@ -51,7 +51,8 @@ static int __init flash_init(void)
flash_map.name = "phys_mapped_flash";
flash_map.phys = region_cfg.s.base << 16;
flash_map.size = 0x1fc00000 - flash_map.phys;
flash_map.bankwidth = 1;
/* 8-bit bus (0 + 1) or 16-bit bus (1 + 1) */
flash_map.bankwidth = region_cfg.s.width + 1;
flash_map.virt = ioremap(flash_map.phys, flash_map.size);
pr_notice("Bootbus flash: Setting flash for %luMB flash at "
"0x%08llx\n", flash_map.size >> 20, flash_map.phys);
......
CONFIG_ATH79=y
CONFIG_ATH79_MACH_AP121=y
CONFIG_ATH79_MACH_AP81=y
CONFIG_ATH79_MACH_DB120=y
CONFIG_ATH79_MACH_PB44=y
CONFIG_ATH79_MACH_UBNT_XM=y
CONFIG_HZ_100=y
# CONFIG_SECCOMP is not set
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
CONFIG_RD_LZMA=y
# CONFIG_KALLSYMS is not set
# CONFIG_AIO is not set
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_PCI=y
# CONFIG_SUSPEND is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MAC80211_DEBUGFS=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_MTD=y
CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_M25P80=y
# CONFIG_M25PXX_USE_FAST_READ is not set
CONFIG_NETDEVICES=y
# CONFIG_NET_PACKET_ENGINE is not set
CONFIG_ATH_COMMON=m
CONFIG_ATH9K=m
CONFIG_ATH9K_AHB=y
CONFIG_INPUT=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO_POLLED=m
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_MISC=y
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
# CONFIG_I2C_COMPAT is not set
# CONFIG_I2C_HELPER_AUTO is not set
CONFIG_I2C_GPIO=y
CONFIG_SPI=y
CONFIG_SPI_ATH79=y
CONFIG_SPI_GPIO=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_PCF857X=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_ATH79_WDT=y
# CONFIG_VGA_ARB is not set
# CONFIG_HID is not set
# CONFIG_USB_HID is not set
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_PROC_PAGE_MONITOR is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_FTRACE is not set
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_ITU_T=m
......@@ -44,8 +44,8 @@ union bcm47xx_bus {
extern union bcm47xx_bus bcm47xx_bus;
extern enum bcm47xx_bus_type bcm47xx_bus_type;
void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix);
void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix);
void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
bool fallback);
#ifdef CONFIG_BCM47XX_SSB
void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo,
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
*/
#ifndef __ASM_MIPS_MACH_BCM47XX_GPIO_H
#define __ASM_MIPS_MACH_BCM47XX_GPIO_H
#ifndef __BCM47XX_GPIO_H
#define __BCM47XX_GPIO_H
#include <asm-generic/gpio.h>
#include <linux/ssb/ssb_embedded.h>
#include <linux/bcma/bcma.h>
#include <asm/mach-bcm47xx/bcm47xx.h>
#define gpio_get_value __gpio_get_value
#define gpio_set_value __gpio_set_value
#define BCM47XX_EXTIF_GPIO_LINES 5
#define BCM47XX_CHIPCO_GPIO_LINES 16
#define gpio_cansleep __gpio_cansleep
#define gpio_to_irq __gpio_to_irq
extern int gpio_request(unsigned gpio, const char *label);
extern void gpio_free(unsigned gpio);
extern int gpio_to_irq(unsigned gpio);
static inline int gpio_get_value(unsigned gpio)
static inline int irq_to_gpio(unsigned int irq)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
return ssb_gpio_in(&bcm47xx_bus.ssb, 1 << gpio);
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
return bcma_chipco_gpio_in(&bcm47xx_bus.bcma.bus.drv_cc,
1 << gpio);
#endif
}
return -EINVAL;
}
#define gpio_get_value_cansleep gpio_get_value
static inline void gpio_set_value(unsigned gpio, int value)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
ssb_gpio_out(&bcm47xx_bus.ssb, 1 << gpio,
value ? 1 << gpio : 0);
return;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
bcma_chipco_gpio_out(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
value ? 1 << gpio : 0);
return;
#endif
}
}
#define gpio_set_value_cansleep gpio_set_value
static inline int gpio_cansleep(unsigned gpio)
{
return 0;
}
static inline int gpio_is_valid(unsigned gpio)
{
return gpio < (BCM47XX_EXTIF_GPIO_LINES + BCM47XX_CHIPCO_GPIO_LINES);
}
static inline int gpio_direction_input(unsigned gpio)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
ssb_gpio_outen(&bcm47xx_bus.ssb, 1 << gpio, 0);
return 0;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
bcma_chipco_gpio_outen(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
0);
return 0;
#endif
}
return -EINVAL;
}
static inline int gpio_direction_output(unsigned gpio, int value)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
/* first set the gpio out value */
ssb_gpio_out(&bcm47xx_bus.ssb, 1 << gpio,
value ? 1 << gpio : 0);
/* then set the gpio mode */
ssb_gpio_outen(&bcm47xx_bus.ssb, 1 << gpio, 1 << gpio);
return 0;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
/* first set the gpio out value */
bcma_chipco_gpio_out(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
value ? 1 << gpio : 0);
/* then set the gpio mode */
bcma_chipco_gpio_outen(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
1 << gpio);
return 0;
#endif
}
return -EINVAL;
}
static inline int gpio_intmask(unsigned gpio, int value)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
ssb_gpio_intmask(&bcm47xx_bus.ssb, 1 << gpio,
value ? 1 << gpio : 0);
return 0;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
bcma_chipco_gpio_intmask(&bcm47xx_bus.bcma.bus.drv_cc,
1 << gpio, value ? 1 << gpio : 0);
return 0;
#endif
}
return -EINVAL;
}
static inline int gpio_polarity(unsigned gpio, int value)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
ssb_gpio_polarity(&bcm47xx_bus.ssb, 1 << gpio,
value ? 1 << gpio : 0);
return 0;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
bcma_chipco_gpio_polarity(&bcm47xx_bus.bcma.bus.drv_cc,
1 << gpio, value ? 1 << gpio : 0);
return 0;
#endif
}
return -EINVAL;
}
#endif /* __BCM47XX_GPIO_H */
#ifndef BCM63XX_NVRAM_H
#define BCM63XX_NVRAM_H
#include <linux/types.h>
/**
* bcm63xx_nvram_init() - initializes nvram
* @nvram: address of the nvram data
*
* Initialized the local nvram copy from the target address and checks
* its checksum.
*
* Returns 0 on success.
*/
int __init bcm63xx_nvram_init(void *nvram);
/**
* bcm63xx_nvram_get_name() - returns the board name according to nvram
*
* Returns the board name field from nvram. Note that it might not be
* null terminated if it is exactly 16 bytes long.
*/
u8 *bcm63xx_nvram_get_name(void);
/**
* bcm63xx_nvram_get_mac_address() - register & return a new mac address
* @mac: pointer to array for allocated mac
*
* Registers and returns a mac address from the allocated macs from nvram.
*
* Returns 0 on success.
*/
int bcm63xx_nvram_get_mac_address(u8 *mac);
#endif /* BCM63XX_NVRAM_H */
......@@ -53,13 +53,18 @@
CKCTL_6338_SAR_EN | \
CKCTL_6338_SPI_EN)
#define CKCTL_6345_CPU_EN (1 << 0)
#define CKCTL_6345_BUS_EN (1 << 1)
#define CKCTL_6345_EBI_EN (1 << 2)
#define CKCTL_6345_UART_EN (1 << 3)
#define CKCTL_6345_ADSLPHY_EN (1 << 4)
#define CKCTL_6345_ENET_EN (1 << 7)
#define CKCTL_6345_USBH_EN (1 << 8)
/* BCM6345 clock bits are shifted by 16 on the left, because of the test
* control register which is 16-bits wide. That way we do not have any
* specific BCM6345 code for handling clocks, and writing 0 to the test
* control register is fine.
*/
#define CKCTL_6345_CPU_EN (1 << 16)
#define CKCTL_6345_BUS_EN (1 << 17)
#define CKCTL_6345_EBI_EN (1 << 18)
#define CKCTL_6345_UART_EN (1 << 19)
#define CKCTL_6345_ADSLPHY_EN (1 << 20)
#define CKCTL_6345_ENET_EN (1 << 23)
#define CKCTL_6345_USBH_EN (1 << 24)
#define CKCTL_6345_ALL_SAFE_EN (CKCTL_6345_ENET_EN | \
CKCTL_6345_USBH_EN | \
......@@ -191,6 +196,7 @@
/* Soft Reset register */
#define PERF_SOFTRESET_REG 0x28
#define PERF_SOFTRESET_6328_REG 0x10
#define PERF_SOFTRESET_6358_REG 0x34
#define PERF_SOFTRESET_6368_REG 0x10
#define SOFTRESET_6328_SPI_MASK (1 << 0)
......@@ -244,6 +250,15 @@
SOFTRESET_6348_ACLC_MASK | \
SOFTRESET_6348_ADSLMIPSPLL_MASK)
#define SOFTRESET_6358_SPI_MASK (1 << 0)
#define SOFTRESET_6358_ENET_MASK (1 << 2)
#define SOFTRESET_6358_MPI_MASK (1 << 3)
#define SOFTRESET_6358_EPHY_MASK (1 << 6)
#define SOFTRESET_6358_SAR_MASK (1 << 7)
#define SOFTRESET_6358_USBH_MASK (1 << 12)
#define SOFTRESET_6358_PCM_MASK (1 << 13)
#define SOFTRESET_6358_ADSL_MASK (1 << 14)
#define SOFTRESET_6368_SPI_MASK (1 << 0)
#define SOFTRESET_6368_MPI_MASK (1 << 3)
#define SOFTRESET_6368_EPHY_MASK (1 << 6)
......
#ifndef __BCM63XX_RESET_H
#define __BCM63XX_RESET_H
enum bcm63xx_core_reset {
BCM63XX_RESET_SPI,
BCM63XX_RESET_ENET,
BCM63XX_RESET_USBH,
BCM63XX_RESET_USBD,
BCM63XX_RESET_SAR,
BCM63XX_RESET_DSL,
BCM63XX_RESET_EPHY,
BCM63XX_RESET_ENETSW,
BCM63XX_RESET_PCM,
BCM63XX_RESET_MPI,
BCM63XX_RESET_PCIE,
BCM63XX_RESET_PCIE_EXT,
};
void bcm63xx_core_set_reset(enum bcm63xx_core_reset, int reset);
#endif
......@@ -14,23 +14,6 @@
#define BCM963XX_CFE_VERSION_OFFSET 0x570
#define BCM963XX_NVRAM_OFFSET 0x580
/*
* nvram structure
*/
struct bcm963xx_nvram {
u32 version;
u8 reserved1[256];
u8 name[16];
u32 main_tp_number;
u32 psi_size;
u32 mac_addr_count;
u8 mac_addr_base[6];
u8 reserved2[2];
u32 checksum_old;
u8 reserved3[720];
u32 checksum_high;
};
/*
* board definition
*/
......
......@@ -82,6 +82,9 @@ extern __iomem void *ltq_cgu_membase;
#define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000)
#define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344))
/* allow booting xrx200 phys */
int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr);
/* request a non-gpio and set the PIO config */
#define PMU_PPE BIT(13)
extern void ltq_pmu_enable(unsigned int module);
......
......@@ -18,6 +18,7 @@ extern struct platform_device ls1x_eth0_device;
extern struct platform_device ls1x_ehci_device;
extern struct platform_device ls1x_rtc_device;
void ls1x_serial_setup(void);
extern void __init ls1x_clk_init(void);
extern void __init ls1x_serial_setup(struct platform_device *pdev);
#endif /* __ASM_MACH_LOONGSON1_PLATFORM_H */
......@@ -20,14 +20,15 @@
/* Clock PLL Divisor Register Bits */
#define DIV_DC_EN (0x1 << 31)
#define DIV_DC (0x1f << 26)
#define DIV_CPU_EN (0x1 << 25)
#define DIV_CPU (0x1f << 20)
#define DIV_DDR_EN (0x1 << 19)
#define DIV_DDR (0x1f << 14)
#define DIV_DC_SHIFT 26
#define DIV_CPU_SHIFT 20
#define DIV_DDR_SHIFT 14
#define DIV_DC_WIDTH 5
#define DIV_CPU_WIDTH 5
#define DIV_DDR_WIDTH 5
#endif /* __ASM_MACH_LOONGSON1_REGS_CLK_H */
......@@ -8,7 +8,9 @@
#ifndef __ASM_NETLOGIC_IRQ_H
#define __ASM_NETLOGIC_IRQ_H
#define NR_IRQS 64
#include <asm/mach-netlogic/multi-node.h>
#define NR_IRQS (64 * NLM_NR_NODES)
#define MIPS_CPU_IRQ_BASE 0
#endif /* __ASM_NETLOGIC_IRQ_H */
/*
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the Broadcom
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _NETLOGIC_MULTI_NODE_H_
#define _NETLOGIC_MULTI_NODE_H_
#ifndef CONFIG_NLM_MULTINODE
#define NLM_NR_NODES 1
#else
#if defined(CONFIG_NLM_MULTINODE_2)
#define NLM_NR_NODES 2
#elif defined(CONFIG_NLM_MULTINODE_4)
#define NLM_NR_NODES 4
#else
#define NLM_NR_NODES 1
#endif
#endif
#define NLM_CORES_PER_NODE 8
#define NLM_THREADS_PER_CORE 4
#define NLM_CPUS_PER_NODE (NLM_CORES_PER_NODE * NLM_THREADS_PER_CORE)
#endif
......@@ -45,15 +45,19 @@
#define BOOT_NMI_HANDLER 8
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
#include <linux/spinlock.h>
#include <asm/irq.h>
#include <asm/mach-netlogic/multi-node.h>
struct irq_desc;
extern struct plat_smp_ops nlm_smp_ops;
extern char nlm_reset_entry[], nlm_reset_entry_end[];
void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc);
void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc);
void nlm_smp_irq_init(void);
void nlm_smp_irq_init(int hwcpuid);
void nlm_boot_secondary_cpus(void);
int nlm_wakeup_secondary_cpus(u32 wakeup_mask);
int nlm_wakeup_secondary_cpus(void);
void nlm_rmiboot_preboot(void);
void nlm_percpu_init(int hwcpuid);
static inline void
nlm_set_nmi_handler(void *handler)
......@@ -68,9 +72,42 @@ nlm_set_nmi_handler(void *handler)
* Misc.
*/
unsigned int nlm_get_cpu_frequency(void);
void nlm_node_init(int node);
extern struct plat_smp_ops nlm_smp_ops;
extern char nlm_reset_entry[], nlm_reset_entry_end[];
extern unsigned int nlm_threads_per_core;
extern cpumask_t nlm_cpumask;
struct nlm_soc_info {
unsigned long coremask; /* cores enabled on the soc */
unsigned long ebase;
uint64_t irqmask;
uint64_t sysbase; /* only for XLP */
uint64_t picbase;
spinlock_t piclock;
};
#define nlm_get_node(i) (&nlm_nodes[i])
#ifdef CONFIG_CPU_XLR
#define nlm_current_node() (&nlm_nodes[0])
#else
#define nlm_current_node() (&nlm_nodes[nlm_nodeid()])
#endif
struct irq_data;
uint64_t nlm_pci_irqmask(int node);
void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *));
/*
* The NR_IRQs is divided between nodes, each of them has a separate irq space
*/
static inline int nlm_irq_to_xirq(int node, int irq)
{
return node * NR_IRQS / NLM_NR_NODES + irq;
}
extern unsigned long nlm_common_ebase;
extern int nlm_threads_per_core;
extern uint32_t nlm_cpumask, nlm_coremask;
extern struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
extern int nlm_cpu_ready[];
#endif
#endif /* _NETLOGIC_COMMON_H_ */
......@@ -39,7 +39,7 @@
#define IRQ_IPI_SMP_FUNCTION 3
#define IRQ_IPI_SMP_RESCHEDULE 4
#define IRQ_MSGRING 6
#define IRQ_FMN 5
#define IRQ_TIMER 7
#endif
......@@ -73,4 +73,146 @@ static inline int hard_smp_processor_id(void)
return __read_32bit_c0_register($15, 1) & 0x3ff;
}
static inline int nlm_nodeid(void)
{
return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
}
static inline unsigned int nlm_core_id(void)
{
return (read_c0_ebase() & 0x1c) >> 2;
}
static inline unsigned int nlm_thread_id(void)
{
return read_c0_ebase() & 0x3;
}
#define __read_64bit_c2_split(source, sel) \
({ \
unsigned long long __val; \
unsigned long __flags; \
\
local_irq_save(__flags); \
if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmfc2\t%M0, " #source "\n\t" \
"dsll\t%L0, %M0, 32\n\t" \
"dsra\t%M0, %M0, 32\n\t" \
"dsra\t%L0, %L0, 32\n\t" \
".set\tmips0\n\t" \
: "=r" (__val)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmfc2\t%M0, " #source ", " #sel "\n\t" \
"dsll\t%L0, %M0, 32\n\t" \
"dsra\t%M0, %M0, 32\n\t" \
"dsra\t%L0, %L0, 32\n\t" \
".set\tmips0\n\t" \
: "=r" (__val)); \
local_irq_restore(__flags); \
\
__val; \
})
#define __write_64bit_c2_split(source, sel, val) \
do { \
unsigned long __flags; \
\
local_irq_save(__flags); \
if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
"dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc2\t%L0, " #source "\n\t" \
".set\tmips0\n\t" \
: : "r" (val)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
"dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc2\t%L0, " #source ", " #sel "\n\t" \
".set\tmips0\n\t" \
: : "r" (val)); \
local_irq_restore(__flags); \
} while (0)
#define __read_32bit_c2_register(source, sel) \
({ uint32_t __res; \
if (sel == 0) \
__asm__ __volatile__( \
".set\tmips32\n\t" \
"mfc2\t%0, " #source "\n\t" \
".set\tmips0\n\t" \
: "=r" (__res)); \
else \
__asm__ __volatile__( \
".set\tmips32\n\t" \
"mfc2\t%0, " #source ", " #sel "\n\t" \
".set\tmips0\n\t" \
: "=r" (__res)); \
__res; \
})
#define __read_64bit_c2_register(source, sel) \
({ unsigned long long __res; \
if (sizeof(unsigned long) == 4) \
__res = __read_64bit_c2_split(source, sel); \
else if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmfc2\t%0, " #source "\n\t" \
".set\tmips0\n\t" \
: "=r" (__res)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmfc2\t%0, " #source ", " #sel "\n\t" \
".set\tmips0\n\t" \
: "=r" (__res)); \
__res; \
})
#define __write_64bit_c2_register(register, sel, value) \
do { \
if (sizeof(unsigned long) == 4) \
__write_64bit_c2_split(register, sel, value); \
else if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmtc2\t%z0, " #register "\n\t" \
".set\tmips0\n\t" \
: : "Jr" (value)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmtc2\t%z0, " #register ", " #sel "\n\t" \
".set\tmips0\n\t" \
: : "Jr" (value)); \
} while (0)
#define __write_32bit_c2_register(reg, sel, value) \
({ \
if (sel == 0) \
__asm__ __volatile__( \
".set\tmips32\n\t" \
"mtc2\t%z0, " #reg "\n\t" \
".set\tmips0\n\t" \
: : "Jr" (value)); \
else \
__asm__ __volatile__( \
".set\tmips32\n\t" \
"mtc2\t%z0, " #reg ", " #sel "\n\t" \
".set\tmips0\n\t" \
: : "Jr" (value)); \
})
#endif /*_ASM_NLM_MIPS_EXTS_H */
......@@ -273,36 +273,16 @@ nlm_pic_read_irt(uint64_t base, int irt_index)
return nlm_read_pic_reg(base, PIC_IRT(irt_index));
}
static inline uint64_t
nlm_pic_read_control(uint64_t base)
{
return nlm_read_pic_reg(base, PIC_CTRL);
}
static inline void
nlm_pic_write_control(uint64_t base, uint64_t control)
{
nlm_write_pic_reg(base, PIC_CTRL, control);
}
static inline void
nlm_pic_update_control(uint64_t base, uint64_t control)
{
uint64_t val;
val = nlm_read_pic_reg(base, PIC_CTRL);
nlm_write_pic_reg(base, PIC_CTRL, control | val);
}
static inline void
nlm_set_irt_to_cpu(uint64_t base, int irt, int cpu)
{
uint64_t val;
val = nlm_read_pic_reg(base, PIC_IRT(irt));
val |= cpu & 0xf;
if (cpu > 15)
val |= 1 << 16;
/* clear cpuset and mask */
val &= ~((0x7ull << 16) | 0xffff);
/* set DB, cpuset and cpumask */
val |= (1 << 19) | ((cpu >> 4) << 16) | (1 << (cpu & 0xf));
nlm_write_pic_reg(base, PIC_IRT(irt), val);
}
......@@ -369,7 +349,7 @@ nlm_pic_enable_irt(uint64_t base, int irt)
static inline void
nlm_pic_disable_irt(uint64_t base, int irt)
{
uint32_t reg;
uint64_t reg;
reg = nlm_read_pic_reg(base, PIC_IRT(irt));
nlm_write_pic_reg(base, PIC_IRT(irt), reg & ~((uint64_t)1 << 31));
......@@ -379,15 +359,9 @@ static inline void
nlm_pic_send_ipi(uint64_t base, int hwt, int irq, int nmi)
{
uint64_t ipi;
int node, ncpu;
node = hwt / 32;
ncpu = hwt & 0x1f;
ipi = ((uint64_t)nmi << 31) | (irq << 20) | (node << 17) |
(1 << (ncpu & 0xf));
if (ncpu > 15)
ipi |= 0x10000; /* Setting bit 16 to select cpus 16-31 */
ipi = (nmi << 31) | (irq << 20);
ipi |= ((hwt >> 4) << 16) | (1 << (hwt & 0xf)); /* cpuset and mask */
nlm_write_pic_reg(base, PIC_IPI_CTL, ipi);
}
......@@ -404,12 +378,10 @@ nlm_pic_ack(uint64_t base, int irt_num)
static inline void
nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
{
nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, 0);
nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, hwt);
}
extern uint64_t nlm_pic_base;
int nlm_irq_to_irt(int irq);
int nlm_irt_to_irq(int irt);
#endif /* __ASSEMBLY__ */
#endif /* _NLM_HAL_PIC_H */
......@@ -124,6 +124,5 @@
#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
#define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
extern uint64_t nlm_sys_base;
#endif
#endif
/*
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the Broadcom
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _NLM_FMN_H_
#define _NLM_FMN_H_
#include <asm/netlogic/mips-extns.h> /* for COP2 access */
/* Station IDs */
#define FMN_STNID_CPU0 0x00
#define FMN_STNID_CPU1 0x08
#define FMN_STNID_CPU2 0x10
#define FMN_STNID_CPU3 0x18
#define FMN_STNID_CPU4 0x20
#define FMN_STNID_CPU5 0x28
#define FMN_STNID_CPU6 0x30
#define FMN_STNID_CPU7 0x38
#define FMN_STNID_XGS0_TX 64
#define FMN_STNID_XMAC0_00_TX 64
#define FMN_STNID_XMAC0_01_TX 65
#define FMN_STNID_XMAC0_02_TX 66
#define FMN_STNID_XMAC0_03_TX 67
#define FMN_STNID_XMAC0_04_TX 68
#define FMN_STNID_XMAC0_05_TX 69
#define FMN_STNID_XMAC0_06_TX 70
#define FMN_STNID_XMAC0_07_TX 71
#define FMN_STNID_XMAC0_08_TX 72
#define FMN_STNID_XMAC0_09_TX 73
#define FMN_STNID_XMAC0_10_TX 74
#define FMN_STNID_XMAC0_11_TX 75
#define FMN_STNID_XMAC0_12_TX 76
#define FMN_STNID_XMAC0_13_TX 77
#define FMN_STNID_XMAC0_14_TX 78
#define FMN_STNID_XMAC0_15_TX 79
#define FMN_STNID_XGS1_TX 80
#define FMN_STNID_XMAC1_00_TX 80
#define FMN_STNID_XMAC1_01_TX 81
#define FMN_STNID_XMAC1_02_TX 82
#define FMN_STNID_XMAC1_03_TX 83
#define FMN_STNID_XMAC1_04_TX 84
#define FMN_STNID_XMAC1_05_TX 85
#define FMN_STNID_XMAC1_06_TX 86
#define FMN_STNID_XMAC1_07_TX 87
#define FMN_STNID_XMAC1_08_TX 88
#define FMN_STNID_XMAC1_09_TX 89
#define FMN_STNID_XMAC1_10_TX 90
#define FMN_STNID_XMAC1_11_TX 91
#define FMN_STNID_XMAC1_12_TX 92
#define FMN_STNID_XMAC1_13_TX 93
#define FMN_STNID_XMAC1_14_TX 94
#define FMN_STNID_XMAC1_15_TX 95
#define FMN_STNID_GMAC 96
#define FMN_STNID_GMACJFR_0 96
#define FMN_STNID_GMACRFR_0 97
#define FMN_STNID_GMACTX0 98
#define FMN_STNID_GMACTX1 99
#define FMN_STNID_GMACTX2 100
#define FMN_STNID_GMACTX3 101
#define FMN_STNID_GMACJFR_1 102
#define FMN_STNID_GMACRFR_1 103
#define FMN_STNID_DMA 104
#define FMN_STNID_DMA_0 104
#define FMN_STNID_DMA_1 105
#define FMN_STNID_DMA_2 106
#define FMN_STNID_DMA_3 107
#define FMN_STNID_XGS0FR 112
#define FMN_STNID_XMAC0JFR 112
#define FMN_STNID_XMAC0RFR 113
#define FMN_STNID_XGS1FR 114
#define FMN_STNID_XMAC1JFR 114
#define FMN_STNID_XMAC1RFR 115
#define FMN_STNID_SEC 120
#define FMN_STNID_SEC0 120
#define FMN_STNID_SEC1 121
#define FMN_STNID_SEC2 122
#define FMN_STNID_SEC3 123
#define FMN_STNID_PK0 124
#define FMN_STNID_SEC_RSA 124
#define FMN_STNID_SEC_RSVD0 125
#define FMN_STNID_SEC_RSVD1 126
#define FMN_STNID_SEC_RSVD2 127
#define FMN_STNID_GMAC1 80
#define FMN_STNID_GMAC1_FR_0 81
#define FMN_STNID_GMAC1_TX0 82
#define FMN_STNID_GMAC1_TX1 83
#define FMN_STNID_GMAC1_TX2 84
#define FMN_STNID_GMAC1_TX3 85
#define FMN_STNID_GMAC1_FR_1 87
#define FMN_STNID_GMAC0 96
#define FMN_STNID_GMAC0_FR_0 97
#define FMN_STNID_GMAC0_TX0 98
#define FMN_STNID_GMAC0_TX1 99
#define FMN_STNID_GMAC0_TX2 100
#define FMN_STNID_GMAC0_TX3 101
#define FMN_STNID_GMAC0_FR_1 103
#define FMN_STNID_CMP_0 108
#define FMN_STNID_CMP_1 109
#define FMN_STNID_CMP_2 110
#define FMN_STNID_CMP_3 111
#define FMN_STNID_PCIE_0 116
#define FMN_STNID_PCIE_1 117
#define FMN_STNID_PCIE_2 118
#define FMN_STNID_PCIE_3 119
#define FMN_STNID_XLS_PK0 121
#define nlm_read_c2_cc0(s) __read_32bit_c2_register($16, s)
#define nlm_read_c2_cc1(s) __read_32bit_c2_register($17, s)
#define nlm_read_c2_cc2(s) __read_32bit_c2_register($18, s)
#define nlm_read_c2_cc3(s) __read_32bit_c2_register($19, s)
#define nlm_read_c2_cc4(s) __read_32bit_c2_register($20, s)
#define nlm_read_c2_cc5(s) __read_32bit_c2_register($21, s)
#define nlm_read_c2_cc6(s) __read_32bit_c2_register($22, s)
#define nlm_read_c2_cc7(s) __read_32bit_c2_register($23, s)
#define nlm_read_c2_cc8(s) __read_32bit_c2_register($24, s)
#define nlm_read_c2_cc9(s) __read_32bit_c2_register($25, s)
#define nlm_read_c2_cc10(s) __read_32bit_c2_register($26, s)
#define nlm_read_c2_cc11(s) __read_32bit_c2_register($27, s)
#define nlm_read_c2_cc12(s) __read_32bit_c2_register($28, s)
#define nlm_read_c2_cc13(s) __read_32bit_c2_register($29, s)
#define nlm_read_c2_cc14(s) __read_32bit_c2_register($30, s)
#define nlm_read_c2_cc15(s) __read_32bit_c2_register($31, s)
#define nlm_write_c2_cc0(s, v) __write_32bit_c2_register($16, s, v)
#define nlm_write_c2_cc1(s, v) __write_32bit_c2_register($17, s, v)
#define nlm_write_c2_cc2(s, v) __write_32bit_c2_register($18, s, v)
#define nlm_write_c2_cc3(s, v) __write_32bit_c2_register($19, s, v)
#define nlm_write_c2_cc4(s, v) __write_32bit_c2_register($20, s, v)
#define nlm_write_c2_cc5(s, v) __write_32bit_c2_register($21, s, v)
#define nlm_write_c2_cc6(s, v) __write_32bit_c2_register($22, s, v)
#define nlm_write_c2_cc7(s, v) __write_32bit_c2_register($23, s, v)
#define nlm_write_c2_cc8(s, v) __write_32bit_c2_register($24, s, v)
#define nlm_write_c2_cc9(s, v) __write_32bit_c2_register($25, s, v)
#define nlm_write_c2_cc10(s, v) __write_32bit_c2_register($26, s, v)
#define nlm_write_c2_cc11(s, v) __write_32bit_c2_register($27, s, v)
#define nlm_write_c2_cc12(s, v) __write_32bit_c2_register($28, s, v)
#define nlm_write_c2_cc13(s, v) __write_32bit_c2_register($29, s, v)
#define nlm_write_c2_cc14(s, v) __write_32bit_c2_register($30, s, v)
#define nlm_write_c2_cc15(s, v) __write_32bit_c2_register($31, s, v)
#define nlm_read_c2_status(sel) __read_32bit_c2_register($2, 0)
#define nlm_read_c2_config() __read_32bit_c2_register($3, 0)
#define nlm_write_c2_config(v) __write_32bit_c2_register($3, 0, v)
#define nlm_read_c2_bucksize(b) __read_32bit_c2_register($4, b)
#define nlm_write_c2_bucksize(b, v) __write_32bit_c2_register($4, b, v)
#define nlm_read_c2_rx_msg0() __read_64bit_c2_register($1, 0)
#define nlm_read_c2_rx_msg1() __read_64bit_c2_register($1, 1)
#define nlm_read_c2_rx_msg2() __read_64bit_c2_register($1, 2)
#define nlm_read_c2_rx_msg3() __read_64bit_c2_register($1, 3)
#define nlm_write_c2_tx_msg0(v) __write_64bit_c2_register($0, 0, v)
#define nlm_write_c2_tx_msg1(v) __write_64bit_c2_register($0, 1, v)
#define nlm_write_c2_tx_msg2(v) __write_64bit_c2_register($0, 2, v)
#define nlm_write_c2_tx_msg3(v) __write_64bit_c2_register($0, 3, v)
#define FMN_STN_RX_QSIZE 256
#define FMN_NSTATIONS 128
#define FMN_CORE_NBUCKETS 8
static inline void nlm_msgsnd(unsigned int stid)
{
__asm__ volatile (
".set push\n"
".set noreorder\n"
".set noat\n"
"move $1, %0\n"
"c2 0x10001\n" /* msgsnd $1 */
".set pop\n"
: : "r" (stid) : "$1"
);
}
static inline void nlm_msgld(unsigned int pri)
{
__asm__ volatile (
".set push\n"
".set noreorder\n"
".set noat\n"
"move $1, %0\n"
"c2 0x10002\n" /* msgld $1 */
".set pop\n"
: : "r" (pri) : "$1"
);
}
static inline void nlm_msgwait(unsigned int mask)
{
__asm__ volatile (
".set push\n"
".set noreorder\n"
".set noat\n"
"move $8, %0\n"
"c2 0x10003\n" /* msgwait $1 */
".set pop\n"
: : "r" (mask) : "$1"
);
}
/*
* Disable interrupts and enable COP2 access
*/
static inline uint32_t nlm_cop2_enable(void)
{
uint32_t sr = read_c0_status();
write_c0_status((sr & ~ST0_IE) | ST0_CU2);
return sr;
}
static inline void nlm_cop2_restore(uint32_t sr)
{
write_c0_status(sr);
}
static inline void nlm_fmn_setup_intr(int irq, unsigned int tmask)
{
uint32_t config;
config = (1 << 24) /* interrupt water mark - 1 msg */
| (irq << 16) /* irq */
| (tmask << 8) /* thread mask */
| 0x2; /* enable watermark intr, disable empty intr */
nlm_write_c2_config(config);
}
struct nlm_fmn_msg {
uint64_t msg0;
uint64_t msg1;
uint64_t msg2;
uint64_t msg3;
};
static inline int nlm_fmn_send(unsigned int size, unsigned int code,
unsigned int stid, struct nlm_fmn_msg *msg)
{
unsigned int dest;
uint32_t status;
int i;
/*
* Make sure that all the writes pending at the cpu are flushed.
* Any writes pending on CPU will not be see by devices. L1/L2
* caches are coherent with IO, so no cache flush needed.
*/
__asm __volatile("sync");
/* Load TX message buffers */
nlm_write_c2_tx_msg0(msg->msg0);
nlm_write_c2_tx_msg1(msg->msg1);
nlm_write_c2_tx_msg2(msg->msg2);
nlm_write_c2_tx_msg3(msg->msg3);
dest = ((size - 1) << 16) | (code << 8) | stid;
/*
* Retry a few times on credit fail, this should be a
* transient condition, unless there is a configuration
* failure, or the receiver is stuck.
*/
for (i = 0; i < 8; i++) {
nlm_msgsnd(dest);
status = nlm_read_c2_status(0);
if ((status & 0x2) == 1)
pr_info("Send pending fail!\n");
if ((status & 0x4) == 0)
return 0;
}
/* If there is a credit failure, return error */
return status & 0x06;
}
static inline int nlm_fmn_receive(int bucket, int *size, int *code, int *stid,
struct nlm_fmn_msg *msg)
{
uint32_t status, tmp;
nlm_msgld(bucket);
/* wait for load pending to clear */
do {
status = nlm_read_c2_status(1);
} while ((status & 0x08) != 0);
/* receive error bits */
tmp = status & 0x30;
if (tmp != 0)
return tmp;
*size = ((status & 0xc0) >> 6) + 1;
*code = (status & 0xff00) >> 8;
*stid = (status & 0x7f0000) >> 16;
msg->msg0 = nlm_read_c2_rx_msg0();
msg->msg1 = nlm_read_c2_rx_msg1();
msg->msg2 = nlm_read_c2_rx_msg2();
msg->msg3 = nlm_read_c2_rx_msg3();
return 0;
}
struct xlr_fmn_info {
int num_buckets;
int start_stn_id;
int end_stn_id;
int credit_config[128];
};
struct xlr_board_fmn_config {
int bucket_size[128]; /* size of buckets for all stations */
struct xlr_fmn_info cpu[8];
struct xlr_fmn_info gmac[2];
struct xlr_fmn_info dma;
struct xlr_fmn_info cmp;
struct xlr_fmn_info sae;
struct xlr_fmn_info xgmac[2];
};
extern int nlm_register_fmn_handler(int start, int end,
void (*fn)(int, int, int, int, struct nlm_fmn_msg *, void *),
void *arg);
extern void xlr_percpu_fmn_init(void);
extern void nlm_setup_fmn_irq(void);
extern void xlr_board_info_setup(void);
extern struct xlr_board_fmn_config xlr_board_fmn_config;
#endif
......@@ -258,7 +258,5 @@ nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
nlm_write_reg(base, PIC_IRT_1(irt),
(1 << 30) | (1 << 6) | irq);
}
extern uint64_t nlm_pic_base;
#endif
#endif /* _ASM_NLM_XLR_PIC_H */
......@@ -51,10 +51,8 @@ static inline unsigned int nlm_chip_is_xls_b(void)
return ((prid & 0xf000) == 0x4000);
}
/*
* XLR chip types
*/
/* The XLS product line has chip versions 0x[48c]? */
/* XLR chip types */
/* The XLS product line has chip versions 0x[48c]? */
static inline unsigned int nlm_chip_is_xls(void)
{
uint32_t prid = read_c0_prid();
......
......@@ -840,6 +840,16 @@ static const struct mips_perf_event bmips5000_event_map
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
};
static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
[PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
};
/* 24K/34K/1004K cores can share the same cache event map. */
static const struct mips_perf_event mipsxxcore_cache_map
[PERF_COUNT_HW_CACHE_MAX]
......@@ -1092,6 +1102,100 @@ static const struct mips_perf_event octeon_cache_map
},
};
static const struct mips_perf_event xlp_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
[C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
[C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
[C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
[C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
[C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(DTLB)] = {
/*
* Only general DTLB misses are counted use the same event for
* read and write.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { 0x25, CNTR_ALL },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
},
},
};
#ifdef CONFIG_MIPS_MT_SMP
static void check_and_calc_range(struct perf_event *event,
const struct mips_perf_event *pev)
......@@ -1444,6 +1548,20 @@ static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
return &raw_event;
}
static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
{
unsigned int raw_id = config & 0xff;
/* Only 1-63 are defined */
if ((raw_id < 0x01) || (raw_id > 0x3f))
return ERR_PTR(-EOPNOTSUPP);
raw_event.cntr_mask = CNTR_ALL;
raw_event.event_id = raw_id;
return &raw_event;
}
static int __init
init_hw_perf_events(void)
{
......@@ -1522,6 +1640,12 @@ init_hw_perf_events(void)
mipspmu.general_event_map = &bmips5000_event_map;
mipspmu.cache_event_map = &bmips5000_cache_map;
break;
case CPU_XLP:
mipspmu.name = "xlp";
mipspmu.general_event_map = &xlp_event_map;
mipspmu.cache_event_map = &xlp_cache_map;
mipspmu.map_raw_event = xlp_pmu_map_raw_event;
break;
default:
pr_cont("Either hardware does not support performance "
"counters, or not yet implemented.\n");
......
......@@ -36,4 +36,8 @@ config PCI_LANTIQ
bool "PCI Support"
depends on SOC_XWAY && PCI
config XRX200_PHY_FW
bool "XRX200 PHY firmware loader"
depends on SOC_XWAY
endif
......@@ -87,9 +87,6 @@ void __init device_tree_init(void)
reserve_bootmem(base, size, BOOTMEM_DEFAULT);
unflatten_device_tree();
/* free the space reserved for the dt blob */
free_bootmem(base, size);
}
void __init prom_init(void)
......@@ -119,7 +116,7 @@ int __init plat_of_setup(void)
sizeof(of_ids[0].compatible));
strncpy(of_ids[1].compatible, "simple-bus",
sizeof(of_ids[1].compatible));
return of_platform_bus_probe(NULL, of_ids, NULL);
return of_platform_populate(NULL, of_ids, NULL, NULL);
}
arch_initcall(plat_of_setup);
obj-y := prom.o sysctrl.o clk.o reset.o dma.o gptu.o
obj-$(CONFIG_XRX200_PHY_FW) += xrx200_phy_fw.o
......@@ -25,6 +25,7 @@
#include <lantiq_soc.h>
#include <xway_dma.h>
#define LTQ_DMA_ID 0x08
#define LTQ_DMA_CTRL 0x10
#define LTQ_DMA_CPOLL 0x14
#define LTQ_DMA_CS 0x18
......@@ -214,6 +215,7 @@ ltq_dma_init(struct platform_device *pdev)
{
struct clk *clk;
struct resource *res;
unsigned id;
int i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
......@@ -243,7 +245,12 @@ ltq_dma_init(struct platform_device *pdev)
ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
}
dev_info(&pdev->dev, "init done\n");
id = ltq_dma_r32(LTQ_DMA_ID);
dev_info(&pdev->dev,
"Init done - hw rev: %X, ports: %d, channels: %d\n",
id & 0x1f, (id >> 16) & 0xf, id >> 20);
return 0;
}
......
......@@ -28,17 +28,24 @@
#define RCU_RST_REQ 0x0010
/* reset status register */
#define RCU_RST_STAT 0x0014
/* vr9 gphy registers */
#define RCU_GFS_ADD0_XRX200 0x0020
#define RCU_GFS_ADD1_XRX200 0x0068
/* reboot bit */
#define RCU_RD_GPHY0_XRX200 BIT(31)
#define RCU_RD_SRST BIT(30)
#define RCU_RD_GPHY1_XRX200 BIT(29)
/* reset cause */
#define RCU_STAT_SHIFT 26
/* boot selection */
#define RCU_BOOT_SEL_SHIFT 26
#define RCU_BOOT_SEL_MASK 0x7
#define RCU_BOOT_SEL(x) ((x >> 18) & 0x7)
#define RCU_BOOT_SEL_XRX200(x) (((x >> 17) & 0xf) | ((x >> 8) & 0x10))
/* remapped base addr of the reset control unit */
static void __iomem *ltq_rcu_membase;
static struct device_node *ltq_rcu_np;
/* This function is used by the watchdog driver */
int ltq_reset_cause(void)
......@@ -52,7 +59,41 @@ EXPORT_SYMBOL_GPL(ltq_reset_cause);
unsigned char ltq_boot_select(void)
{
u32 val = ltq_rcu_r32(RCU_RST_STAT);
return (val >> RCU_BOOT_SEL_SHIFT) & RCU_BOOT_SEL_MASK;
if (of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200"))
return RCU_BOOT_SEL_XRX200(val);
return RCU_BOOT_SEL(val);
}
/* reset / boot a gphy */
static struct ltq_xrx200_gphy_reset {
u32 rd;
u32 addr;
} xrx200_gphy[] = {
{RCU_RD_GPHY0_XRX200, RCU_GFS_ADD0_XRX200},
{RCU_RD_GPHY1_XRX200, RCU_GFS_ADD1_XRX200},
};
/* reset and boot a gphy. these phys only exist on xrx200 SoC */
int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr)
{
if (!of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) {
dev_err(dev, "this SoC has no GPHY\n");
return -EINVAL;
}
if (id > 1) {
dev_err(dev, "%u is an invalid gphy id\n", id);
return -EINVAL;
}
dev_info(dev, "booting GPHY%u firmware at %X\n", id, dev_addr);
ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | xrx200_gphy[id].rd,
RCU_RST_REQ);
ltq_rcu_w32(dev_addr, xrx200_gphy[id].addr);
ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~xrx200_gphy[id].rd,
RCU_RST_REQ);
return 0;
}
/* reset a io domain for u micro seconds */
......@@ -85,14 +126,17 @@ static void ltq_machine_power_off(void)
static int __init mips_reboot_setup(void)
{
struct resource res;
struct device_node *np =
of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway");
ltq_rcu_np = of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway");
if (!ltq_rcu_np)
ltq_rcu_np = of_find_compatible_node(NULL, NULL,
"lantiq,rcu-xrx200");
/* check if all the reset register range is available */
if (!np)
if (!ltq_rcu_np)
panic("Failed to load reset resources from devicetree");
if (of_address_to_resource(np, 0, &res))
if (of_address_to_resource(ltq_rcu_np, 0, &res))
panic("Failed to get rcu memory range");
if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
......
......@@ -370,6 +370,10 @@ void __init ltq_soc_init(void)
clkdev_add_pmu("1d900000.pcie", "pdi", 1, PMU1_PCIE_PDI);
clkdev_add_pmu("1d900000.pcie", "ctl", 1, PMU1_PCIE_CTL);
clkdev_add_pmu("1d900000.pcie", "ahb", 0, PMU_AHBM | PMU_AHBS);
clkdev_add_pmu("1e108000.eth", NULL, 0,
PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
PMU_PPE_QSB | PMU_PPE_TOP);
} else if (of_machine_is_compatible("lantiq,ar9")) {
clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
ltq_ar9_fpi_hz());
......
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Copyright (C) 2012 John Crispin <blogic@openwrt.org>
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/of_platform.h>
#include <lantiq_soc.h>
#define XRX200_GPHY_FW_ALIGN (16 * 1024)
static dma_addr_t xway_gphy_load(struct platform_device *pdev)
{
const struct firmware *fw;
dma_addr_t dev_addr = 0;
const char *fw_name;
void *fw_addr;
size_t size;
if (of_property_read_string(pdev->dev.of_node, "firmware", &fw_name)) {
dev_err(&pdev->dev, "failed to load firmware filename\n");
return 0;
}
dev_info(&pdev->dev, "requesting %s\n", fw_name);
if (request_firmware(&fw, fw_name, &pdev->dev)) {
dev_err(&pdev->dev, "failed to load firmware: %s\n", fw_name);
return 0;
}
/*
* GPHY cores need the firmware code in a persistent and contiguous
* memory area with a 16 kB boundary aligned start address
*/
size = fw->size + XRX200_GPHY_FW_ALIGN;
fw_addr = dma_alloc_coherent(&pdev->dev, size, &dev_addr, GFP_KERNEL);
if (fw_addr) {
fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN);
dev_addr = ALIGN(dev_addr, XRX200_GPHY_FW_ALIGN);
memcpy(fw_addr, fw->data, fw->size);
} else {
dev_err(&pdev->dev, "failed to alloc firmware memory\n");
}
release_firmware(fw);
return dev_addr;
}
static int __devinit xway_phy_fw_probe(struct platform_device *pdev)
{
dma_addr_t fw_addr;
struct property *pp;
unsigned char *phyids;
int i, ret = 0;
fw_addr = xway_gphy_load(pdev);
if (!fw_addr)
return -EINVAL;
pp = of_find_property(pdev->dev.of_node, "phys", NULL);
if (!pp)
return -ENOENT;
phyids = pp->value;
for (i = 0; i < pp->length && !ret; i++)
ret = xrx200_gphy_boot(&pdev->dev, phyids[i], fw_addr);
if (!ret)
mdelay(100);
return ret;
}
static const struct of_device_id xway_phy_match[] = {
{ .compatible = "lantiq,phy-xrx200" },
{},
};
MODULE_DEVICE_TABLE(of, xway_phy_match);
static struct platform_driver xway_phy_driver = {
.probe = xway_phy_fw_probe,
.driver = {
.name = "phy-xrx200",
.owner = THIS_MODULE,
.of_match_table = xway_phy_match,
},
};
module_platform_driver(xway_phy_driver);
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Lantiq XRX200 PHY Firmware Loader");
MODULE_LICENSE("GPL");
......@@ -15,7 +15,7 @@ config LOONGSON1_LS1B
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select SYS_HAS_EARLY_PRINTK
select HAVE_CLK
select COMMON_CLK
endchoice
......
......@@ -7,175 +7,22 @@
* option) any later version.
*/
#include <linux/module.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <asm/clock.h>
#include <asm/time.h>
#include <loongson1.h>
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
struct clk *clk_get(struct device *dev, const char *name)
{
struct clk *c;
struct clk *ret = NULL;
mutex_lock(&clocks_mutex);
list_for_each_entry(c, &clocks, node) {
if (!strcmp(c->name, name)) {
ret = c;
break;
}
}
mutex_unlock(&clocks_mutex);
return ret;
}
EXPORT_SYMBOL(clk_get);
int clk_enable(struct clk *clk)
{
return 0;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
}
EXPORT_SYMBOL(clk_disable);
unsigned long clk_get_rate(struct clk *clk)
{
return clk->rate;
}
EXPORT_SYMBOL(clk_get_rate);
void clk_put(struct clk *clk)
{
}
EXPORT_SYMBOL(clk_put);
static void pll_clk_init(struct clk *clk)
{
u32 pll;
pll = __raw_readl(LS1X_CLK_PLL_FREQ);
clk->rate = (12 + (pll & 0x3f)) * 33 / 2
+ ((pll >> 8) & 0x3ff) * 33 / 1024 / 2;
clk->rate *= 1000000;
}
static void cpu_clk_init(struct clk *clk)
{
u32 pll, ctrl;
pll = clk_get_rate(clk->parent);
ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_CPU;
clk->rate = pll / (ctrl >> DIV_CPU_SHIFT);
}
static void ddr_clk_init(struct clk *clk)
{
u32 pll, ctrl;
pll = clk_get_rate(clk->parent);
ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_DDR;
clk->rate = pll / (ctrl >> DIV_DDR_SHIFT);
}
static void dc_clk_init(struct clk *clk)
{
u32 pll, ctrl;
pll = clk_get_rate(clk->parent);
ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_DC;
clk->rate = pll / (ctrl >> DIV_DC_SHIFT);
}
static struct clk_ops pll_clk_ops = {
.init = pll_clk_init,
};
static struct clk_ops cpu_clk_ops = {
.init = cpu_clk_init,
};
static struct clk_ops ddr_clk_ops = {
.init = ddr_clk_init,
};
static struct clk_ops dc_clk_ops = {
.init = dc_clk_init,
};
static struct clk pll_clk = {
.name = "pll",
.ops = &pll_clk_ops,
};
static struct clk cpu_clk = {
.name = "cpu",
.parent = &pll_clk,
.ops = &cpu_clk_ops,
};
static struct clk ddr_clk = {
.name = "ddr",
.parent = &pll_clk,
.ops = &ddr_clk_ops,
};
static struct clk dc_clk = {
.name = "dc",
.parent = &pll_clk,
.ops = &dc_clk_ops,
};
int clk_register(struct clk *clk)
{
mutex_lock(&clocks_mutex);
list_add(&clk->node, &clocks);
if (clk->ops->init)
clk->ops->init(clk);
mutex_unlock(&clocks_mutex);
return 0;
}
EXPORT_SYMBOL(clk_register);
static struct clk *ls1x_clks[] = {
&pll_clk,
&cpu_clk,
&ddr_clk,
&dc_clk,
};
int __init ls1x_clock_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(ls1x_clks); i++)
clk_register(ls1x_clks[i]);
return 0;
}
#include <platform.h>
void __init plat_time_init(void)
{
struct clk *clk;
/* Initialize LS1X clocks */
ls1x_clock_init();
ls1x_clk_init();
/* setup mips r4k timer */
clk = clk_get(NULL, "cpu");
if (IS_ERR(clk))
panic("unable to get dc clock, err=%ld", PTR_ERR(clk));
panic("unable to get cpu clock, err=%ld", PTR_ERR(clk));
mips_hpt_frequency = clk_get_rate(clk) / 2;
}
......@@ -42,16 +42,17 @@ struct platform_device ls1x_uart_device = {
},
};
void __init ls1x_serial_setup(void)
void __init ls1x_serial_setup(struct platform_device *pdev)
{
struct clk *clk;
struct plat_serial8250_port *p;
clk = clk_get(NULL, "dc");
clk = clk_get(NULL, pdev->name);
if (IS_ERR(clk))
panic("unable to get dc clock, err=%ld", PTR_ERR(clk));
panic("unable to get %s clock, err=%ld",
pdev->name, PTR_ERR(clk));
for (p = ls1x_serial8250_port; p->flags != 0; ++p)
for (p = pdev->dev.platform_data; p->flags != 0; ++p)
p->uartclk = clk_get_rate(clk);
}
......@@ -70,7 +71,6 @@ static struct resource ls1x_eth0_resources[] = {
};
static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
.bus_id = 0,
.phy_mask = 0,
};
......
......@@ -9,9 +9,6 @@
#include <platform.h>
#include <linux/serial_8250.h>
#include <loongson1.h>
static struct platform_device *ls1b_platform_devices[] __initdata = {
&ls1x_uart_device,
&ls1x_eth0_device,
......@@ -23,7 +20,7 @@ static int __init ls1b_platform_init(void)
{
int err;
ls1x_serial_setup();
ls1x_serial_setup(&ls1x_uart_device);
err = platform_add_devices(ls1b_platform_devices,
ARRAY_SIZE(ls1b_platform_devices));
......
......@@ -1333,10 +1333,10 @@ static int __init cca_setup(char *str)
{
get_option(&str, &cca);
return 1;
return 0;
}
__setup("cca=", cca_setup);
early_param("cca", cca_setup);
static void __cpuinit coherency_setup(void)
{
......@@ -1386,10 +1386,10 @@ static int __init setcoherentio(char *str)
{
coherentio = 1;
return 1;
return 0;
}
__setup("coherentio", setcoherentio);
early_param("coherentio", setcoherentio);
#endif
static void __cpuinit r4k_cache_error_setup(void)
......
......@@ -183,7 +183,9 @@ UASM_L_LA(_tlb_huge_update)
static int __cpuinitdata hazard_instance;
static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
static void __cpuinit uasm_bgezl_hazard(u32 **p,
struct uasm_reloc **r,
int instance)
{
switch (instance) {
case 0 ... 7:
......@@ -194,7 +196,9 @@ static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
}
}
static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
u32 **p,
int instance)
{
switch (instance) {
case 0 ... 7:
......
......@@ -9,6 +9,34 @@ config DT_XLP_EVP
This DTB will be used if the firmware does not pass in a DTB
pointer to the kernel. The corresponding DTS file is at
arch/mips/netlogic/dts/xlp_evp.dts
config NLM_MULTINODE
bool "Support for multi-chip boards"
depends on NLM_XLP_BOARD
default n
help
Add support for boards with 2 or 4 XLPs connected over ICI.
if NLM_MULTINODE
choice
prompt "Number of XLPs on the board"
default NLM_MULTINODE_2
help
In the multi-node case, specify the number of SoCs on the board.
config NLM_MULTINODE_2
bool "Dual-XLP board"
help
Support boards with upto two XLPs connected over ICI.
config NLM_MULTINODE_4
bool "Quad-XLP board"
help
Support boards with upto four XLPs connected over ICI.
endchoice
endif
endif
config NLM_COMMON
......
......@@ -36,7 +36,6 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/irq.h>
......@@ -59,68 +58,70 @@
#elif defined(CONFIG_CPU_XLR)
#include <asm/netlogic/xlr/iomap.h>
#include <asm/netlogic/xlr/pic.h>
#include <asm/netlogic/xlr/fmn.h>
#else
#error "Unknown CPU"
#endif
/*
* These are the routines that handle all the low level interrupt stuff.
* Actions handled here are: initialization of the interrupt map, requesting of
* interrupt lines by handlers, dispatching if interrupts to handlers, probing
* for interrupt lines
*/
/* Globals */
static uint64_t nlm_irq_mask;
static DEFINE_SPINLOCK(nlm_pic_lock);
#ifdef CONFIG_SMP
#define SMP_IRQ_MASK ((1ULL << IRQ_IPI_SMP_FUNCTION) | \
(1ULL << IRQ_IPI_SMP_RESCHEDULE))
#else
#define SMP_IRQ_MASK 0
#endif
#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
(1ull << IRQ_FMN))
struct nlm_pic_irq {
void (*extra_ack)(struct irq_data *);
struct nlm_soc_info *node;
int picirq;
int irt;
int flags;
};
static void xlp_pic_enable(struct irq_data *d)
{
unsigned long flags;
int irt;
struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
irt = nlm_irq_to_irt(d->irq);
if (irt == -1)
return;
spin_lock_irqsave(&nlm_pic_lock, flags);
nlm_pic_enable_irt(nlm_pic_base, irt);
spin_unlock_irqrestore(&nlm_pic_lock, flags);
BUG_ON(!pd);
spin_lock_irqsave(&pd->node->piclock, flags);
nlm_pic_enable_irt(pd->node->picbase, pd->irt);
spin_unlock_irqrestore(&pd->node->piclock, flags);
}
static void xlp_pic_disable(struct irq_data *d)
{
struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
unsigned long flags;
int irt;
irt = nlm_irq_to_irt(d->irq);
if (irt == -1)
return;
spin_lock_irqsave(&nlm_pic_lock, flags);
nlm_pic_disable_irt(nlm_pic_base, irt);
spin_unlock_irqrestore(&nlm_pic_lock, flags);
BUG_ON(!pd);
spin_lock_irqsave(&pd->node->piclock, flags);
nlm_pic_disable_irt(pd->node->picbase, pd->irt);
spin_unlock_irqrestore(&pd->node->piclock, flags);
}
static void xlp_pic_mask_ack(struct irq_data *d)
{
uint64_t mask = 1ull << d->irq;
struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
uint64_t mask = 1ull << pd->picirq;
write_c0_eirr(mask); /* ack by writing EIRR */
}
static void xlp_pic_unmask(struct irq_data *d)
{
void *hd = irq_data_get_irq_handler_data(d);
int irt;
struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
irt = nlm_irq_to_irt(d->irq);
if (irt == -1)
if (!pd)
return;
if (hd) {
void (*extra_ack)(void *) = hd;
extra_ack(d);
}
if (pd->extra_ack)
pd->extra_ack(d);
/* Ack is a single write, no need to lock */
nlm_pic_ack(nlm_pic_base, irt);
nlm_pic_ack(pd->node->picbase, pd->irt);
}
static struct irq_chip xlp_pic = {
......@@ -174,64 +175,108 @@ struct irq_chip nlm_cpu_intr = {
.irq_eoi = cpuintr_ack,
};
void __init init_nlm_common_irqs(void)
static void __init nlm_init_percpu_irqs(void)
{
int i, irq, irt;
int i;
for (i = 0; i < PIC_IRT_FIRST_IRQ; i++)
irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq);
for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ ; i++)
irq_set_chip_and_handler(i, &xlp_pic, handle_level_irq);
#ifdef CONFIG_SMP
irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr,
nlm_smp_function_ipi_handler);
irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr,
nlm_smp_resched_ipi_handler);
nlm_irq_mask |=
((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE));
#endif
}
void nlm_setup_pic_irq(int node, int picirq, int irq, int irt)
{
struct nlm_pic_irq *pic_data;
int xirq;
xirq = nlm_irq_to_xirq(node, irq);
pic_data = kzalloc(sizeof(*pic_data), GFP_KERNEL);
BUG_ON(pic_data == NULL);
pic_data->irt = irt;
pic_data->picirq = picirq;
pic_data->node = nlm_get_node(node);
irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq);
irq_set_handler_data(xirq, pic_data);
}
void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
{
struct nlm_pic_irq *pic_data;
int xirq;
xirq = nlm_irq_to_xirq(node, irq);
pic_data = irq_get_handler_data(xirq);
pic_data->extra_ack = xack;
}
for (irq = PIC_IRT_FIRST_IRQ; irq <= PIC_IRT_LAST_IRQ; irq++) {
irt = nlm_irq_to_irt(irq);
static void nlm_init_node_irqs(int node)
{
int i, irt;
uint64_t irqmask;
struct nlm_soc_info *nodep;
pr_info("Init IRQ for node %d\n", node);
nodep = nlm_get_node(node);
irqmask = PERCPU_IRQ_MASK;
for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) {
irt = nlm_irq_to_irt(i);
if (irt == -1)
continue;
nlm_irq_mask |= (1ULL << irq);
nlm_pic_init_irt(nlm_pic_base, irt, irq, 0);
nlm_setup_pic_irq(node, i, i, irt);
/* set interrupts to first cpu in node */
nlm_pic_init_irt(nodep->picbase, irt, i,
node * NLM_CPUS_PER_NODE);
irqmask |= (1ull << i);
}
nlm_irq_mask |= (1ULL << IRQ_TIMER);
nodep->irqmask = irqmask;
}
void __init arch_init_irq(void)
{
/* Initialize the irq descriptors */
init_nlm_common_irqs();
write_c0_eimr(nlm_irq_mask);
nlm_init_percpu_irqs();
nlm_init_node_irqs(0);
write_c0_eimr(nlm_current_node()->irqmask);
#if defined(CONFIG_CPU_XLR)
nlm_setup_fmn_irq();
#endif
}
void __cpuinit nlm_smp_irq_init(void)
void nlm_smp_irq_init(int hwcpuid)
{
/* set interrupt mask for non-zero cpus */
write_c0_eimr(nlm_irq_mask);
int node, cpu;
node = hwcpuid / NLM_CPUS_PER_NODE;
cpu = hwcpuid % NLM_CPUS_PER_NODE;
if (cpu == 0 && node != 0)
nlm_init_node_irqs(node);
write_c0_eimr(nlm_current_node()->irqmask);
}
asmlinkage void plat_irq_dispatch(void)
{
uint64_t eirr;
int i;
int i, node;
node = nlm_nodeid();
eirr = read_c0_eirr() & read_c0_eimr();
if (eirr & (1 << IRQ_TIMER)) {
do_IRQ(IRQ_TIMER);
return;
}
i = __ilog2_u64(eirr);
if (i == -1)
return;
do_IRQ(i);
/* per-CPU IRQs don't need translation */
if (eirr & PERCPU_IRQ_MASK) {
do_IRQ(i);
return;
}
/* top level irq handling */
do_IRQ(nlm_irq_to_xirq(node, i));
}
......@@ -59,12 +59,17 @@
void nlm_send_ipi_single(int logical_cpu, unsigned int action)
{
int cpu = cpu_logical_map(logical_cpu);
int cpu, node;
uint64_t picbase;
cpu = cpu_logical_map(logical_cpu);
node = cpu / NLM_CPUS_PER_NODE;
picbase = nlm_get_node(node)->picbase;
if (action & SMP_CALL_FUNCTION)
nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_FUNCTION, 0);
nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
if (action & SMP_RESCHEDULE_YOURSELF)
nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
}
void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
......@@ -96,11 +101,12 @@ void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
void nlm_early_init_secondary(int cpu)
{
change_c0_config(CONF_CM_CMASK, 0x3);
write_c0_ebase((uint32_t)nlm_common_ebase);
#ifdef CONFIG_CPU_XLP
if (hard_smp_processor_id() % 4 == 0)
/* mmu init, once per core */
if (cpu % NLM_THREADS_PER_CORE == 0)
xlp_mmu_init();
#endif
write_c0_ebase(nlm_current_node()->ebase);
}
/*
......@@ -108,8 +114,12 @@ void nlm_early_init_secondary(int cpu)
*/
static void __cpuinit nlm_init_secondary(void)
{
current_cpu_data.core = hard_smp_processor_id() / 4;
nlm_smp_irq_init();
int hwtid;
hwtid = hard_smp_processor_id();
current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
nlm_percpu_init(hwtid);
nlm_smp_irq_init(hwtid);
}
void nlm_prepare_cpus(unsigned int max_cpus)
......@@ -120,9 +130,6 @@ void nlm_prepare_cpus(unsigned int max_cpus)
void nlm_smp_finish(void)
{
#ifdef notyet
nlm_common_msgring_cpu_init();
#endif
local_irq_enable();
}
......@@ -142,27 +149,27 @@ cpumask_t phys_cpu_present_map;
void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
{
unsigned long gp = (unsigned long)task_thread_info(idle);
unsigned long sp = (unsigned long)__KSTK_TOS(idle);
int cpu = cpu_logical_map(logical_cpu);
int cpu, node;
nlm_next_sp = sp;
nlm_next_gp = gp;
cpu = cpu_logical_map(logical_cpu);
node = cpu / NLM_CPUS_PER_NODE;
nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
nlm_next_gp = (unsigned long)task_thread_info(idle);
/* barrier */
/* barrier for sp/gp store above */
__sync();
nlm_pic_send_ipi(nlm_pic_base, cpu, 1, 1);
nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */
}
void __init nlm_smp_setup(void)
{
unsigned int boot_cpu;
int num_cpus, i;
int num_cpus, i, ncore;
boot_cpu = hard_smp_processor_id();
cpus_clear(phys_cpu_present_map);
cpumask_clear(&phys_cpu_present_map);
cpu_set(boot_cpu, phys_cpu_present_map);
cpumask_set_cpu(boot_cpu, &phys_cpu_present_map);
__cpu_number_map[boot_cpu] = 0;
__cpu_logical_map[0] = boot_cpu;
set_cpu_possible(0, true);
......@@ -174,7 +181,7 @@ void __init nlm_smp_setup(void)
* it is only set for ASPs (see smpboot.S)
*/
if (nlm_cpu_ready[i]) {
cpu_set(i, phys_cpu_present_map);
cpumask_set_cpu(i, &phys_cpu_present_map);
__cpu_number_map[i] = num_cpus;
__cpu_logical_map[num_cpus] = i;
set_cpu_possible(num_cpus, true);
......@@ -182,20 +189,28 @@ void __init nlm_smp_setup(void)
}
}
/* check with the cores we have worken up */
for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
ncore += hweight32(nlm_get_node(i)->coremask);
pr_info("Phys CPU present map: %lx, possible map %lx\n",
(unsigned long)phys_cpu_present_map.bits[0],
(unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
(unsigned long)cpumask_bits(cpu_possible_mask)[0]);
pr_info("Detected %i Slave CPU(s)\n", num_cpus);
pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
nlm_threads_per_core, num_cpus);
nlm_set_nmi_handler(nlm_boot_secondary_cpus);
}
static int nlm_parse_cpumask(u32 cpu_mask)
static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
{
uint32_t core0_thr_mask, core_thr_mask;
int threadmode, i;
int threadmode, i, j;
core0_thr_mask = cpu_mask & 0xf;
core0_thr_mask = 0;
for (i = 0; i < NLM_THREADS_PER_CORE; i++)
if (cpumask_test_cpu(i, wakeup_mask))
core0_thr_mask |= (1 << i);
switch (core0_thr_mask) {
case 1:
nlm_threads_per_core = 1;
......@@ -214,25 +229,23 @@ static int nlm_parse_cpumask(u32 cpu_mask)
}
/* Verify other cores CPU masks */
nlm_coremask = 1;
nlm_cpumask = core0_thr_mask;
for (i = 1; i < 8; i++) {
core_thr_mask = (cpu_mask >> (i * 4)) & 0xf;
if (core_thr_mask) {
if (core_thr_mask != core0_thr_mask)
for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
core_thr_mask = 0;
for (j = 0; j < NLM_THREADS_PER_CORE; j++)
if (cpumask_test_cpu(i + j, wakeup_mask))
core_thr_mask |= (1 << j);
if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
goto unsupp;
nlm_coremask |= 1 << i;
nlm_cpumask |= core0_thr_mask << (4 * i);
}
}
return threadmode;
unsupp:
panic("Unsupported CPU mask %x\n", cpu_mask);
panic("Unsupported CPU mask %lx\n",
(unsigned long)cpumask_bits(wakeup_mask)[0]);
return 0;
}
int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask)
int __cpuinit nlm_wakeup_secondary_cpus(void)
{
unsigned long reset_vec;
char *reset_data;
......@@ -244,7 +257,7 @@ int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask)
(nlm_reset_entry_end - nlm_reset_entry));
/* verify the mask and setup core config variables */
threadmode = nlm_parse_cpumask(wakeup_mask);
threadmode = nlm_parse_cpumask(&nlm_cpumask);
/* Setup CPU init parameters */
reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS);
......
......@@ -61,7 +61,7 @@
li t0, LSU_DEFEATURE
mfcr t1, t0
lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */
or t1, t1, t2
#ifdef XLP_AX_WORKAROUND
li t2, ~0xe /* S1RCM */
......@@ -186,7 +186,7 @@ EXPORT(nlm_boot_siblings)
* jump to the secondary wait function.
*/
mfc0 v0, CP0_EBASE, 1
andi v0, 0x7f /* v0 <- node/core */
andi v0, 0x3ff /* v0 <- node/core */
/* Init MMU in the first thread after changing THREAD_MODE
* register (Ax Errata?)
......@@ -263,6 +263,8 @@ NESTED(nlm_boot_secondary_cpus, 16, sp)
PTR_L gp, 0(t1)
/* a0 has the processor id */
mfc0 a0, CP0_EBASE, 1
andi a0, 0x3ff /* a0 <- node/core */
PTR_LA t0, nlm_early_init_secondary
jalr t0
nop
......
......@@ -40,23 +40,23 @@
#include <asm/mipsregs.h>
#include <asm/time.h>
#include <asm/netlogic/common.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/xlp-hal/iomap.h>
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/pic.h>
#include <asm/netlogic/xlp-hal/sys.h>
/* These addresses are computed by the nlm_hal_init() */
uint64_t nlm_io_base;
uint64_t nlm_sys_base;
uint64_t nlm_pic_base;
/* Main initialization */
void nlm_hal_init(void)
void nlm_node_init(int node)
{
nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
nlm_sys_base = nlm_get_sys_regbase(0); /* node 0 */
nlm_pic_base = nlm_get_pic_regbase(0); /* node 0 */
struct nlm_soc_info *nodep;
nodep = nlm_get_node(node);
nodep->sysbase = nlm_get_sys_regbase(node);
nodep->picbase = nlm_get_pic_regbase(node);
nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
spin_lock_init(&nodep->piclock);
}
int nlm_irq_to_irt(int irq)
......@@ -100,52 +100,15 @@ int nlm_irq_to_irt(int irq)
}
}
int nlm_irt_to_irq(int irt)
{
switch (irt) {
case PIC_IRT_UART_0_INDEX:
return PIC_UART_0_IRQ;
case PIC_IRT_UART_1_INDEX:
return PIC_UART_1_IRQ;
case PIC_IRT_PCIE_LINK_0_INDEX:
return PIC_PCIE_LINK_0_IRQ;
case PIC_IRT_PCIE_LINK_1_INDEX:
return PIC_PCIE_LINK_1_IRQ;
case PIC_IRT_PCIE_LINK_2_INDEX:
return PIC_PCIE_LINK_2_IRQ;
case PIC_IRT_PCIE_LINK_3_INDEX:
return PIC_PCIE_LINK_3_IRQ;
case PIC_IRT_EHCI_0_INDEX:
return PIC_EHCI_0_IRQ;
case PIC_IRT_EHCI_1_INDEX:
return PIC_EHCI_1_IRQ;
case PIC_IRT_OHCI_0_INDEX:
return PIC_OHCI_0_IRQ;
case PIC_IRT_OHCI_1_INDEX:
return PIC_OHCI_1_IRQ;
case PIC_IRT_OHCI_2_INDEX:
return PIC_OHCI_2_IRQ;
case PIC_IRT_OHCI_3_INDEX:
return PIC_OHCI_3_IRQ;
case PIC_IRT_MMC_INDEX:
return PIC_MMC_IRQ;
case PIC_IRT_I2C_0_INDEX:
return PIC_I2C_0_IRQ;
case PIC_IRT_I2C_1_INDEX:
return PIC_I2C_1_IRQ;
default:
return -1;
}
}
unsigned int nlm_get_core_frequency(int core)
unsigned int nlm_get_core_frequency(int node, int core)
{
unsigned int pll_divf, pll_divr, dfs_div, ext_div;
unsigned int rstval, dfsval, denom;
uint64_t num;
uint64_t num, sysbase;
rstval = nlm_read_sys_reg(nlm_sys_base, SYS_POWER_ON_RESET_CFG);
dfsval = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIV_VALUE);
sysbase = nlm_get_node(node)->sysbase;
rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE);
pll_divf = ((rstval >> 10) & 0x7f) + 1;
pll_divr = ((rstval >> 8) & 0x3) + 1;
ext_div = ((rstval >> 30) & 0x3) + 1;
......@@ -159,5 +122,5 @@ unsigned int nlm_get_core_frequency(int core)
unsigned int nlm_get_cpu_frequency(void)
{
return nlm_get_core_frequency(0);
return nlm_get_core_frequency(0, 0);
}
......@@ -52,26 +52,40 @@
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/sys.h>
unsigned long nlm_common_ebase = 0x0;
/* default to uniprocessor */
uint32_t nlm_coremask = 1, nlm_cpumask = 1;
int nlm_threads_per_core = 1;
uint64_t nlm_io_base;
struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
cpumask_t nlm_cpumask = CPU_MASK_CPU0;
unsigned int nlm_threads_per_core;
extern u32 __dtb_start[];
static void nlm_linux_exit(void)
{
nlm_write_sys_reg(nlm_sys_base, SYS_CHIP_RESET, 1);
uint64_t sysbase = nlm_get_node(0)->sysbase;
nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
for ( ; ; )
cpu_wait();
}
void __init plat_mem_setup(void)
{
void *fdtp;
panic_timeout = 5;
_machine_restart = (void (*)(char *))nlm_linux_exit;
_machine_halt = nlm_linux_exit;
pm_power_off = nlm_linux_exit;
/*
* If no FDT pointer is passed in, use the built-in FDT.
* device_tree_init() does not handle CKSEG0 pointers in
* 64-bit, so convert pointer.
*/
fdtp = (void *)(long)fw_arg0;
if (!fdtp)
fdtp = __dtb_start;
fdtp = phys_to_virt(__pa(fdtp));
early_init_devtree(fdtp);
}
const char *get_system_type(void)
......@@ -94,27 +108,19 @@ void xlp_mmu_init(void)
(13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
}
void __init prom_init(void)
void nlm_percpu_init(int hwcpuid)
{
void *fdtp;
}
void __init prom_init(void)
{
nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
xlp_mmu_init();
nlm_hal_init();
/*
* If no FDT pointer is passed in, use the built-in FDT.
* device_tree_init() does not handle CKSEG0 pointers in
* 64-bit, so convert pointer.
*/
fdtp = (void *)(long)fw_arg0;
if (!fdtp)
fdtp = __dtb_start;
fdtp = phys_to_virt(__pa(fdtp));
early_init_devtree(fdtp);
nlm_node_init(0);
nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
#ifdef CONFIG_SMP
nlm_wakeup_secondary_cpus(0xffffffff);
cpumask_setall(&nlm_cpumask);
nlm_wakeup_secondary_cpus();
/* update TLB size after waking up threads */
current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
......
......@@ -51,45 +51,72 @@
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/sys.h>
static void xlp_enable_secondary_cores(void)
static int xlp_wakeup_core(uint64_t sysbase, int core)
{
uint32_t core, value, coremask, syscoremask;
uint32_t coremask, value;
int count;
/* read cores in reset from SYS block */
syscoremask = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET);
coremask = (1 << core);
/* update user specified */
nlm_coremask = nlm_coremask & (syscoremask | 1);
/* Enable CPU clock */
value = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
value &= ~coremask;
nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, value);
for (core = 1; core < 8; core++) {
coremask = 1 << core;
if ((nlm_coremask & coremask) == 0)
continue;
/* Remove CPU Reset */
value = nlm_read_sys_reg(sysbase, SYS_CPU_RESET);
value &= ~coremask;
nlm_write_sys_reg(sysbase, SYS_CPU_RESET, value);
/* Enable CPU clock */
value = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL);
value &= ~coremask;
nlm_write_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL, value);
/* Poll for CPU to mark itself coherent */
count = 100000;
do {
value = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
} while ((value & coremask) != 0 && --count > 0);
/* Remove CPU Reset */
value = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET);
value &= ~coremask;
nlm_write_sys_reg(nlm_sys_base, SYS_CPU_RESET, value);
return count != 0;
}
static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
{
struct nlm_soc_info *nodep;
uint64_t syspcibase;
uint32_t syscoremask;
int core, n, cpu;
for (n = 0; n < NLM_NR_NODES; n++) {
syspcibase = nlm_get_sys_pcibase(n);
if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
break;
/* read cores in reset from SYS and account for boot cpu */
nlm_node_init(n);
nodep = nlm_get_node(n);
syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
if (n == 0)
syscoremask |= 1;
for (core = 0; core < NLM_CORES_PER_NODE; core++) {
/* see if the core exists */
if ((syscoremask & (1 << core)) == 0)
continue;
/* Poll for CPU to mark itself coherent */
count = 100000;
do {
value = nlm_read_sys_reg(nlm_sys_base,
SYS_CPU_NONCOHERENT_MODE);
} while ((value & coremask) != 0 && count-- > 0);
/* see if at least the first thread is enabled */
cpu = (n * NLM_CORES_PER_NODE + core)
* NLM_THREADS_PER_CORE;
if (!cpumask_test_cpu(cpu, wakeup_mask))
continue;
if (count == 0)
pr_err("Failed to enable core %d\n", core);
/* wake up the core */
if (xlp_wakeup_core(nodep->sysbase, core))
nodep->coremask |= 1u << core;
else
pr_err("Failed to enable core %d\n", core);
}
}
}
void xlp_wakeup_secondary_cpus(void)
void xlp_wakeup_secondary_cpus()
{
/*
* In case of u-boot, the secondaries are in reset
......@@ -98,5 +125,5 @@ void xlp_wakeup_secondary_cpus(void)
xlp_boot_core0_siblings();
/* now get other cores out of reset */
xlp_enable_secondary_cores();
xlp_enable_secondary_cores(&nlm_cpumask);
}
obj-y += setup.o platform.o platform-flash.o
obj-$(CONFIG_SMP) += wakeup.o
obj-y += fmn.o fmn-config.o setup.o platform.o platform-flash.o
obj-$(CONFIG_SMP) += wakeup.o
/*
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the Broadcom
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm/cpu-info.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <asm/mipsregs.h>
#include <asm/netlogic/xlr/fmn.h>
#include <asm/netlogic/xlr/xlr.h>
#include <asm/netlogic/common.h>
#include <asm/netlogic/haldefs.h>
struct xlr_board_fmn_config xlr_board_fmn_config;
static void __maybe_unused print_credit_config(struct xlr_fmn_info *fmn_info)
{
int bkt;
pr_info("Bucket size :\n");
pr_info("Station\t: Size\n");
for (bkt = 0; bkt < 16; bkt++)
pr_info(" %d %d %d %d %d %d %d %d\n",
xlr_board_fmn_config.bucket_size[(bkt * 8) + 0],
xlr_board_fmn_config.bucket_size[(bkt * 8) + 1],
xlr_board_fmn_config.bucket_size[(bkt * 8) + 2],
xlr_board_fmn_config.bucket_size[(bkt * 8) + 3],
xlr_board_fmn_config.bucket_size[(bkt * 8) + 4],
xlr_board_fmn_config.bucket_size[(bkt * 8) + 5],
xlr_board_fmn_config.bucket_size[(bkt * 8) + 6],
xlr_board_fmn_config.bucket_size[(bkt * 8) + 7]);
pr_info("\n");
pr_info("Credits distribution :\n");
pr_info("Station\t: Size\n");
for (bkt = 0; bkt < 16; bkt++)
pr_info(" %d %d %d %d %d %d %d %d\n",
fmn_info->credit_config[(bkt * 8) + 0],
fmn_info->credit_config[(bkt * 8) + 1],
fmn_info->credit_config[(bkt * 8) + 2],
fmn_info->credit_config[(bkt * 8) + 3],
fmn_info->credit_config[(bkt * 8) + 4],
fmn_info->credit_config[(bkt * 8) + 5],
fmn_info->credit_config[(bkt * 8) + 6],
fmn_info->credit_config[(bkt * 8) + 7]);
pr_info("\n");
}
static void check_credit_distribution(void)
{
struct xlr_board_fmn_config *cfg = &xlr_board_fmn_config;
int bkt, n, total_credits, ncores;
ncores = hweight32(nlm_current_node()->coremask);
for (bkt = 0; bkt < 128; bkt++) {
total_credits = 0;
for (n = 0; n < ncores; n++)
total_credits += cfg->cpu[n].credit_config[bkt];
total_credits += cfg->gmac[0].credit_config[bkt];
total_credits += cfg->gmac[1].credit_config[bkt];
total_credits += cfg->dma.credit_config[bkt];
total_credits += cfg->cmp.credit_config[bkt];
total_credits += cfg->sae.credit_config[bkt];
total_credits += cfg->xgmac[0].credit_config[bkt];
total_credits += cfg->xgmac[1].credit_config[bkt];
if (total_credits > cfg->bucket_size[bkt])
pr_err("ERROR: Bucket %d: credits (%d) > size (%d)\n",
bkt, total_credits, cfg->bucket_size[bkt]);
}
pr_info("Credit distribution complete.\n");
}
/**
* Configure bucket size and credits for a device. 'size' is the size of
* the buckets for the device. This size is distributed among all the CPUs
* so that all of them can send messages to the device.
*
* The device is also given 'cpu_credits' to send messages to the CPUs
*
* @dev_info: FMN information structure for each devices
* @start_stn_id: Starting station id of dev_info
* @end_stn_id: End station id of dev_info
* @num_buckets: Total number of buckets for den_info
* @cpu_credits: Allowed credits to cpu for each devices pointing by dev_info
* @size: Size of the each buckets in the device station
*/
static void setup_fmn_cc(struct xlr_fmn_info *dev_info, int start_stn_id,
int end_stn_id, int num_buckets, int cpu_credits, int size)
{
int i, j, num_core, n, credits_per_cpu;
struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
num_core = hweight32(nlm_current_node()->coremask);
dev_info->num_buckets = num_buckets;
dev_info->start_stn_id = start_stn_id;
dev_info->end_stn_id = end_stn_id;
n = num_core;
if (num_core == 3)
n = 4;
for (i = start_stn_id; i <= end_stn_id; i++) {
xlr_board_fmn_config.bucket_size[i] = size;
/* Dividing device credits equally to cpus */
credits_per_cpu = size / n;
for (j = 0; j < num_core; j++)
cpu[j].credit_config[i] = credits_per_cpu;
/* credits left to distribute */
credits_per_cpu = size - (credits_per_cpu * num_core);
/* distribute the remaining credits (if any), among cores */
for (j = 0; (j < num_core) && (credits_per_cpu >= 4); j++) {
cpu[j].credit_config[i] += 4;
credits_per_cpu -= 4;
}
}
/* Distributing cpu per bucket credits to devices */
for (i = 0; i < num_core; i++) {
for (j = 0; j < FMN_CORE_NBUCKETS; j++)
dev_info->credit_config[(i * 8) + j] = cpu_credits;
}
}
/*
* Each core has 256 slots and 8 buckets,
* Configure the 8 buckets each with 32 slots
*/
static void setup_cpu_fmninfo(struct xlr_fmn_info *cpu, int num_core)
{
int i, j;
for (i = 0; i < num_core; i++) {
cpu[i].start_stn_id = (8 * i);
cpu[i].end_stn_id = (8 * i + 8);
for (j = cpu[i].start_stn_id; j < cpu[i].end_stn_id; j++)
xlr_board_fmn_config.bucket_size[j] = 32;
}
}
/**
* Setup the FMN details for each devices according to the device available
* in each variant of XLR/XLS processor
*/
void xlr_board_info_setup(void)
{
struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
struct xlr_fmn_info *gmac = xlr_board_fmn_config.gmac;
struct xlr_fmn_info *xgmac = xlr_board_fmn_config.xgmac;
struct xlr_fmn_info *dma = &xlr_board_fmn_config.dma;
struct xlr_fmn_info *cmp = &xlr_board_fmn_config.cmp;
struct xlr_fmn_info *sae = &xlr_board_fmn_config.sae;
int processor_id, num_core;
num_core = hweight32(nlm_current_node()->coremask);
processor_id = read_c0_prid() & 0xff00;
setup_cpu_fmninfo(cpu, num_core);
switch (processor_id) {
case PRID_IMP_NETLOGIC_XLS104:
case PRID_IMP_NETLOGIC_XLS108:
setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
FMN_STNID_GMAC0_TX3, 8, 16, 32);
setup_fmn_cc(dma, FMN_STNID_DMA_0,
FMN_STNID_DMA_3, 4, 8, 64);
setup_fmn_cc(sae, FMN_STNID_SEC0,
FMN_STNID_SEC1, 2, 8, 128);
break;
case PRID_IMP_NETLOGIC_XLS204:
case PRID_IMP_NETLOGIC_XLS208:
setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
FMN_STNID_GMAC0_TX3, 8, 16, 32);
setup_fmn_cc(dma, FMN_STNID_DMA_0,
FMN_STNID_DMA_3, 4, 8, 64);
setup_fmn_cc(sae, FMN_STNID_SEC0,
FMN_STNID_SEC1, 2, 8, 128);
break;
case PRID_IMP_NETLOGIC_XLS404:
case PRID_IMP_NETLOGIC_XLS408:
case PRID_IMP_NETLOGIC_XLS404B:
case PRID_IMP_NETLOGIC_XLS408B:
case PRID_IMP_NETLOGIC_XLS416B:
setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
FMN_STNID_GMAC0_TX3, 8, 8, 32);
setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
FMN_STNID_GMAC1_TX3, 8, 8, 32);
setup_fmn_cc(dma, FMN_STNID_DMA_0,
FMN_STNID_DMA_3, 4, 4, 64);
setup_fmn_cc(cmp, FMN_STNID_CMP_0,
FMN_STNID_CMP_3, 4, 4, 64);
setup_fmn_cc(sae, FMN_STNID_SEC0,
FMN_STNID_SEC1, 2, 8, 128);
break;
case PRID_IMP_NETLOGIC_XLS412B:
setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
FMN_STNID_GMAC0_TX3, 8, 8, 32);
setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
FMN_STNID_GMAC1_TX3, 8, 8, 32);
setup_fmn_cc(dma, FMN_STNID_DMA_0,
FMN_STNID_DMA_3, 4, 4, 64);
setup_fmn_cc(cmp, FMN_STNID_CMP_0,
FMN_STNID_CMP_3, 4, 4, 64);
setup_fmn_cc(sae, FMN_STNID_SEC0,
FMN_STNID_SEC1, 2, 8, 128);
break;
case PRID_IMP_NETLOGIC_XLR308:
case PRID_IMP_NETLOGIC_XLR308C:
setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
FMN_STNID_GMAC0_TX3, 8, 16, 32);
setup_fmn_cc(dma, FMN_STNID_DMA_0,
FMN_STNID_DMA_3, 4, 8, 64);
setup_fmn_cc(sae, FMN_STNID_SEC0,
FMN_STNID_SEC1, 2, 4, 128);
break;
case PRID_IMP_NETLOGIC_XLR532:
case PRID_IMP_NETLOGIC_XLR532C:
case PRID_IMP_NETLOGIC_XLR516C:
case PRID_IMP_NETLOGIC_XLR508C:
setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
FMN_STNID_GMAC0_TX3, 8, 16, 32);
setup_fmn_cc(dma, FMN_STNID_DMA_0,
FMN_STNID_DMA_3, 4, 8, 64);
setup_fmn_cc(sae, FMN_STNID_SEC0,
FMN_STNID_SEC1, 2, 4, 128);
break;
case PRID_IMP_NETLOGIC_XLR732:
case PRID_IMP_NETLOGIC_XLR716:
setup_fmn_cc(&xgmac[0], FMN_STNID_XMAC0_00_TX,
FMN_STNID_XMAC0_15_TX, 8, 0, 32);
setup_fmn_cc(&xgmac[1], FMN_STNID_XMAC1_00_TX,
FMN_STNID_XMAC1_15_TX, 8, 0, 32);
setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
FMN_STNID_GMAC0_TX3, 8, 24, 32);
setup_fmn_cc(dma, FMN_STNID_DMA_0,
FMN_STNID_DMA_3, 4, 4, 64);
setup_fmn_cc(sae, FMN_STNID_SEC0,
FMN_STNID_SEC1, 2, 4, 128);
break;
default:
pr_err("Unknown CPU with processor ID [%d]\n", processor_id);
pr_err("Error: Cannot initialize FMN credits.\n");
}
check_credit_distribution();
#if 0 /* debug */
print_credit_config(&cpu[0]);
print_credit_config(&gmac[0]);
#endif
}
/*
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the Broadcom
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/irqreturn.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <asm/mipsregs.h>
#include <asm/netlogic/interrupt.h>
#include <asm/netlogic/xlr/fmn.h>
#include <asm/netlogic/common.h>
#define COP2_CC_INIT_CPU_DEST(dest, conf) \
do { \
nlm_write_c2_cc##dest(0, conf[(dest * 8) + 0]); \
nlm_write_c2_cc##dest(1, conf[(dest * 8) + 1]); \
nlm_write_c2_cc##dest(2, conf[(dest * 8) + 2]); \
nlm_write_c2_cc##dest(3, conf[(dest * 8) + 3]); \
nlm_write_c2_cc##dest(4, conf[(dest * 8) + 4]); \
nlm_write_c2_cc##dest(5, conf[(dest * 8) + 5]); \
nlm_write_c2_cc##dest(6, conf[(dest * 8) + 6]); \
nlm_write_c2_cc##dest(7, conf[(dest * 8) + 7]); \
} while (0)
struct fmn_message_handler {
void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *);
void *arg;
} msg_handlers[128];
/*
* FMN interrupt handler. We configure the FMN so that any messages in
* any of the CPU buckets will trigger an interrupt on the CPU.
* The message can be from any device on the FMN (like NAE/SAE/DMA).
* The source station id is used to figure out which of the registered
* handlers have to be called.
*/
static irqreturn_t fmn_message_handler(int irq, void *data)
{
struct fmn_message_handler *hndlr;
int bucket, rv;
int size = 0, code = 0, src_stnid = 0;
struct nlm_fmn_msg msg;
uint32_t mflags, bkt_status;
mflags = nlm_cop2_enable();
/* Disable message ring interrupt */
nlm_fmn_setup_intr(irq, 0);
while (1) {
/* 8 bkts per core, [24:31] each bit represents one bucket
* Bit is Zero if bucket is not empty */
bkt_status = (nlm_read_c2_status() >> 24) & 0xff;
if (bkt_status == 0xff)
break;
for (bucket = 0; bucket < 8; bucket++) {
/* Continue on empty bucket */
if (bkt_status & (1 << bucket))
continue;
rv = nlm_fmn_receive(bucket, &size, &code, &src_stnid,
&msg);
if (rv != 0)
continue;
hndlr = &msg_handlers[src_stnid];
if (hndlr->action == NULL)
pr_warn("No msgring handler for stnid %d\n",
src_stnid);
else {
nlm_cop2_restore(mflags);
hndlr->action(bucket, src_stnid, size, code,
&msg, hndlr->arg);
mflags = nlm_cop2_enable();
}
}
};
/* Enable message ring intr, to any thread in core */
nlm_fmn_setup_intr(irq, (1 << nlm_threads_per_core) - 1);
nlm_cop2_restore(mflags);
return IRQ_HANDLED;
}
struct irqaction fmn_irqaction = {
.handler = fmn_message_handler,
.flags = IRQF_PERCPU,
.name = "fmn",
};
void xlr_percpu_fmn_init(void)
{
struct xlr_fmn_info *cpu_fmn_info;
int *bucket_sizes;
uint32_t flags;
int id;
BUG_ON(nlm_thread_id() != 0);
id = nlm_core_id();
bucket_sizes = xlr_board_fmn_config.bucket_size;
cpu_fmn_info = &xlr_board_fmn_config.cpu[id];
flags = nlm_cop2_enable();
/* Setup bucket sizes for the core. */
nlm_write_c2_bucksize(0, bucket_sizes[id * 8 + 0]);
nlm_write_c2_bucksize(1, bucket_sizes[id * 8 + 1]);
nlm_write_c2_bucksize(2, bucket_sizes[id * 8 + 2]);
nlm_write_c2_bucksize(3, bucket_sizes[id * 8 + 3]);
nlm_write_c2_bucksize(4, bucket_sizes[id * 8 + 4]);
nlm_write_c2_bucksize(5, bucket_sizes[id * 8 + 5]);
nlm_write_c2_bucksize(6, bucket_sizes[id * 8 + 6]);
nlm_write_c2_bucksize(7, bucket_sizes[id * 8 + 7]);
/*
* For sending FMN messages, we need credits on the destination
* bucket. Program the credits this core has on the 128 possible
* destination buckets.
* We cannot use a loop here, because the the first argument has
* to be a constant integer value.
*/
COP2_CC_INIT_CPU_DEST(0, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(1, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(2, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(3, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(4, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(5, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(6, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(7, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(8, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(9, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(10, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(11, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(12, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(13, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(14, cpu_fmn_info->credit_config);
COP2_CC_INIT_CPU_DEST(15, cpu_fmn_info->credit_config);
/* enable FMN interrupts on this CPU */
nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
nlm_cop2_restore(flags);
}
/*
* Register a FMN message handler with respect to the source station id
* @stnid: source station id
* @action: Handler function pointer
*/
int nlm_register_fmn_handler(int start_stnid, int end_stnid,
void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *),
void *arg)
{
int sstnid;
for (sstnid = start_stnid; sstnid <= end_stnid; sstnid++) {
msg_handlers[sstnid].arg = arg;
smp_wmb();
msg_handlers[sstnid].action = action;
}
pr_debug("Registered FMN msg handler for stnid %d-%d\n",
start_stnid, end_stnid);
return 0;
}
void nlm_setup_fmn_irq(void)
{
uint32_t flags;
/* setup irq only once */
setup_irq(IRQ_FMN, &fmn_irqaction);
flags = nlm_cop2_enable();
nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
nlm_cop2_restore(flags);
}
......@@ -49,16 +49,15 @@
#include <asm/netlogic/xlr/iomap.h>
#include <asm/netlogic/xlr/pic.h>
#include <asm/netlogic/xlr/gpio.h>
#include <asm/netlogic/xlr/fmn.h>
uint64_t nlm_io_base = DEFAULT_NETLOGIC_IO_BASE;
uint64_t nlm_pic_base;
struct psb_info nlm_prom_info;
unsigned long nlm_common_ebase = 0x0;
/* default to uniprocessor */
uint32_t nlm_coremask = 1, nlm_cpumask = 1;
int nlm_threads_per_core = 1;
unsigned int nlm_threads_per_core = 1;
struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
cpumask_t nlm_cpumask = CPU_MASK_CPU0;
static void __init nlm_early_serial_setup(void)
{
......@@ -113,6 +112,12 @@ void __init prom_free_prom_memory(void)
/* Nothing yet */
}
void nlm_percpu_init(int hwcpuid)
{
if (hwcpuid % 4 == 0)
xlr_percpu_fmn_init();
}
static void __init build_arcs_cmdline(int *argv)
{
int i, remain, len;
......@@ -176,9 +181,19 @@ static void prom_add_memory(void)
}
}
static void nlm_init_node(void)
{
struct nlm_soc_info *nodep;
nodep = nlm_current_node();
nodep->picbase = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
spin_lock_init(&nodep->piclock);
}
void __init prom_init(void)
{
int *argv, *envp; /* passed as 32 bit ptrs */
int i, *argv, *envp; /* passed as 32 bit ptrs */
struct psb_info *prom_infop;
/* truncate to 32 bit and sign extend all args */
......@@ -187,15 +202,19 @@ void __init prom_init(void)
prom_infop = (struct psb_info *)(long)(int)fw_arg3;
nlm_prom_info = *prom_infop;
nlm_pic_base = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
nlm_init_node();
nlm_early_serial_setup();
build_arcs_cmdline(argv);
nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
prom_add_memory();
#ifdef CONFIG_SMP
nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map);
for (i = 0; i < 32; i++)
if (nlm_prom_info.online_cpu_map & (1 << i))
cpumask_set_cpu(i, &nlm_cpumask);
nlm_wakeup_secondary_cpus();
register_smp_ops(&nlm_smp_ops);
#endif
xlr_board_info_setup();
xlr_percpu_fmn_init();
}
......@@ -33,6 +33,7 @@
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/threads.h>
#include <asm/asm.h>
......@@ -50,18 +51,34 @@
int __cpuinit xlr_wakeup_secondary_cpus(void)
{
unsigned int i, boot_cpu;
struct nlm_soc_info *nodep;
unsigned int i, j, boot_cpu;
/*
* In case of RMI boot, hit with NMI to get the cores
* from bootloader to linux code.
*/
nodep = nlm_get_node(0);
boot_cpu = hard_smp_processor_id();
nlm_set_nmi_handler(nlm_rmiboot_preboot);
for (i = 0; i < NR_CPUS; i++) {
if (i == boot_cpu || (nlm_cpumask & (1u << i)) == 0)
if (i == boot_cpu || !cpumask_test_cpu(i, &nlm_cpumask))
continue;
nlm_pic_send_ipi(nlm_pic_base, i, 1, 1); /* send NMI */
nlm_pic_send_ipi(nodep->picbase, i, 1, 1); /* send NMI */
}
/* Fill up the coremask early */
nodep->coremask = 1;
for (i = 1; i < NLM_CORES_PER_NODE; i++) {
for (j = 1000000; j > 0; j--) {
if (nlm_cpu_ready[i * NLM_THREADS_PER_CORE])
break;
udelay(10);
}
if (j != 0)
nodep->coremask |= (1u << i);
else
pr_err("Failed to wakeup core %d\n", i);
}
return 0;
......
......@@ -12,4 +12,5 @@ oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_LOONGSON2) += op_model_loongson2.o
......@@ -90,6 +90,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_XLR:
lmodel = &op_model_mipsxx_ops;
break;
......
......@@ -31,8 +31,22 @@
#define M_COUNTER_OVERFLOW (1UL << 31)
/* Netlogic XLR specific, count events in all threads in a core */
#define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13)
static int (*save_perf_irq)(void);
/*
* XLR has only one set of counters per core. Designate the
* first hardware thread in the core for setup and init.
* Skip CPUs with non-zero hardware thread id (4 hwt per core)
*/
#ifdef CONFIG_CPU_XLR
#define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
#else
#define oprofile_skip_cpu(c) 0
#endif
#ifdef CONFIG_MIPS_MT_SMP
static int cpu_has_mipsmt_pertccounters;
#define WHAT (M_TC_EN_VPE | \
......@@ -152,6 +166,8 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
reg.control[i] |= M_PERFCTL_USER;
if (ctr[i].exl)
reg.control[i] |= M_PERFCTL_EXL;
if (current_cpu_type() == CPU_XLR)
reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
reg.counter[i] = 0x80000000 - ctr[i].count;
}
}
......@@ -162,6 +178,9 @@ static void mipsxx_cpu_setup(void *args)
{
unsigned int counters = op_model_mipsxx_ops.num_counters;
if (oprofile_skip_cpu(smp_processor_id()))
return;
switch (counters) {
case 4:
w_c0_perfctrl3(0);
......@@ -183,6 +202,9 @@ static void mipsxx_cpu_start(void *args)
{
unsigned int counters = op_model_mipsxx_ops.num_counters;
if (oprofile_skip_cpu(smp_processor_id()))
return;
switch (counters) {
case 4:
w_c0_perfctrl3(WHAT | reg.control[3]);
......@@ -200,6 +222,9 @@ static void mipsxx_cpu_stop(void *args)
{
unsigned int counters = op_model_mipsxx_ops.num_counters;
if (oprofile_skip_cpu(smp_processor_id()))
return;
switch (counters) {
case 4:
w_c0_perfctrl3(0);
......@@ -372,6 +397,10 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.cpu_type = "mips/loongson1";
break;
case CPU_XLR:
op_model_mipsxx_ops.cpu_type = "mips/xlr";
break;
default:
printk(KERN_ERR "Profiling unsupported for this CPU\n");
......
......@@ -11,8 +11,11 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <asm/bootinfo.h>
#include <bcm63xx_reset.h>
#include "pci-bcm63xx.h"
/*
......@@ -119,41 +122,36 @@ static void __init bcm63xx_reset_pcie(void)
{
u32 val;
/* enable clock */
val = bcm_perf_readl(PERF_CKCTL_REG);
val |= CKCTL_6328_PCIE_EN;
bcm_perf_writel(val, PERF_CKCTL_REG);
/* enable SERDES */
val = bcm_misc_readl(MISC_SERDES_CTRL_REG);
val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
bcm_misc_writel(val, MISC_SERDES_CTRL_REG);
/* reset the PCIe core */
val = bcm_perf_readl(PERF_SOFTRESET_6328_REG);
val &= ~SOFTRESET_6328_PCIE_MASK;
val &= ~SOFTRESET_6328_PCIE_CORE_MASK;
val &= ~SOFTRESET_6328_PCIE_HARD_MASK;
val &= ~SOFTRESET_6328_PCIE_EXT_MASK;
bcm_perf_writel(val, PERF_SOFTRESET_6328_REG);
bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1);
bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 1);
mdelay(10);
val |= SOFTRESET_6328_PCIE_MASK;
val |= SOFTRESET_6328_PCIE_CORE_MASK;
val |= SOFTRESET_6328_PCIE_HARD_MASK;
bcm_perf_writel(val, PERF_SOFTRESET_6328_REG);
bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 0);
mdelay(10);
val |= SOFTRESET_6328_PCIE_EXT_MASK;
bcm_perf_writel(val, PERF_SOFTRESET_6328_REG);
bcm63xx_core_set_reset(BCM63XX_RESET_PCIE_EXT, 0);
mdelay(200);
}
static struct clk *pcie_clk;
static int __init bcm63xx_register_pcie(void)
{
u32 val;
/* enable clock */
pcie_clk = clk_get(NULL, "pcie");
if (IS_ERR_OR_NULL(pcie_clk))
return -ENODEV;
clk_prepare_enable(pcie_clk);
bcm63xx_reset_pcie();
/* configure the PCIe bridge */
......
......@@ -47,6 +47,7 @@
#include <asm/netlogic/interrupt.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/common.h>
#include <asm/netlogic/xlr/msidef.h>
#include <asm/netlogic/xlr/iomap.h>
......@@ -174,22 +175,9 @@ static struct pci_dev *xls_get_pcie_link(const struct pci_dev *dev)
return p ? bus->self : NULL;
}
static int get_irq_vector(const struct pci_dev *dev)
static int nlm_pci_link_to_irq(int link)
{
struct pci_dev *lnk;
if (!nlm_chip_is_xls())
return PIC_PCIX_IRQ; /* for XLR just one IRQ */
/*
* For XLS PCIe, there is an IRQ per Link, find out which
* link the device is on to assign interrupts
*/
lnk = xls_get_pcie_link(dev);
if (lnk == NULL)
return 0;
switch (PCI_SLOT(lnk->devfn)) {
switch (link) {
case 0:
return PIC_PCIE_LINK0_IRQ;
case 1:
......@@ -205,10 +193,26 @@ static int get_irq_vector(const struct pci_dev *dev)
else
return PIC_PCIE_LINK3_IRQ;
}
WARN(1, "Unexpected devfn %d\n", lnk->devfn);
WARN(1, "Unexpected link %d\n", link);
return 0;
}
static int get_irq_vector(const struct pci_dev *dev)
{
struct pci_dev *lnk;
int link;
if (!nlm_chip_is_xls())
return PIC_PCIX_IRQ; /* for XLR just one IRQ */
lnk = xls_get_pcie_link(dev);
if (lnk == NULL)
return 0;
link = PCI_SLOT(lnk->devfn);
return nlm_pci_link_to_irq(link);
}
#ifdef CONFIG_PCI_MSI
void destroy_irq(unsigned int irq)
{
......@@ -332,6 +336,9 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
static int __init pcibios_init(void)
{
void (*extra_ack)(struct irq_data *);
int link, irq;
/* PSB assigns PCI resources */
pci_set_flags(PCI_PROBE_ONLY);
pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20);
......@@ -350,27 +357,19 @@ static int __init pcibios_init(void)
* For PCI interrupts, we need to ack the PCI controller too, overload
* irq handler data to do this
*/
if (nlm_chip_is_xls()) {
if (nlm_chip_is_xls_b()) {
irq_set_handler_data(PIC_PCIE_LINK0_IRQ,
xls_pcie_ack_b);
irq_set_handler_data(PIC_PCIE_LINK1_IRQ,
xls_pcie_ack_b);
irq_set_handler_data(PIC_PCIE_XLSB0_LINK2_IRQ,
xls_pcie_ack_b);
irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ,
xls_pcie_ack_b);
} else {
irq_set_handler_data(PIC_PCIE_LINK0_IRQ, xls_pcie_ack);
irq_set_handler_data(PIC_PCIE_LINK1_IRQ, xls_pcie_ack);
irq_set_handler_data(PIC_PCIE_LINK2_IRQ, xls_pcie_ack);
irq_set_handler_data(PIC_PCIE_LINK3_IRQ, xls_pcie_ack);
}
} else {
if (!nlm_chip_is_xls()) {
/* XLR PCI controller ACK */
irq_set_handler_data(PIC_PCIX_IRQ, xlr_pci_ack);
nlm_set_pic_extra_ack(0, PIC_PCIX_IRQ, xlr_pci_ack);
} else {
if (nlm_chip_is_xls_b())
extra_ack = xls_pcie_ack_b;
else
extra_ack = xls_pcie_ack;
for (link = 0; link < 4; link++) {
irq = nlm_pci_link_to_irq(link);
nlm_set_pic_extra_ack(0, irq, extra_ack);
}
}
return 0;
}
......
......@@ -65,6 +65,15 @@ config BCMA_DRIVER_GMAC_CMN
If unsure, say N
config BCMA_DRIVER_GPIO
bool "BCMA GPIO driver"
depends on BCMA
select GPIOLIB
help
Driver to provide access to the GPIO pins of the bcma bus.
If unsure, say N
config BCMA_DEBUG
bool "BCMA debugging"
depends on BCMA
......
......@@ -6,6 +6,7 @@ bcma-y += driver_pci.o
bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o
bcma-$(CONFIG_BCMA_DRIVER_GPIO) += driver_gpio.o
bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o
obj-$(CONFIG_BCMA) += bcma.o
......
......@@ -89,4 +89,14 @@ bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
#ifdef CONFIG_BCMA_DRIVER_GPIO
/* driver_gpio.c */
int bcma_gpio_init(struct bcma_drv_cc *cc);
#else
static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
{
return -ENOTSUPP;
}
#endif /* CONFIG_BCMA_DRIVER_GPIO */
#endif
......@@ -30,6 +30,8 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
if (cc->setup_done)
return;
spin_lock_init(&cc->gpio_lock);
if (cc->core->id.rev >= 11)
cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
......@@ -84,28 +86,97 @@ u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask)
u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value);
unsigned long flags;
u32 res;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value);
unsigned long flags;
u32 res;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
/*
* If the bit is set to 0, chipcommon controlls this GPIO,
* if the bit is set to 1, it is used by some part of the chip and not our code.
*/
u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value);
unsigned long flags;
u32 res;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
EXPORT_SYMBOL_GPL(bcma_chipco_gpio_control);
u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value);
unsigned long flags;
u32 res;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
unsigned long flags;
u32 res;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
unsigned long flags;
u32 res;
if (cc->core->id.rev < 20)
return 0;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLUP, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
unsigned long flags;
u32 res;
if (cc->core->id.rev < 20)
return 0;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLDOWN, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
#ifdef CONFIG_BCMA_DRIVER_MIPS
......
/*
* Broadcom specific AMBA
* GPIO driver
*
* Copyright 2011, Broadcom Corporation
* Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/gpio.h>
#include <linux/export.h>
#include <linux/bcma/bcma.h>
#include "bcma_private.h"
static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip)
{
return container_of(chip, struct bcma_drv_cc, gpio);
}
static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
return !!bcma_chipco_gpio_in(cc, 1 << gpio);
}
static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
int value)
{
struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
}
static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
bcma_chipco_gpio_outen(cc, 1 << gpio, 0);
return 0;
}
static int bcma_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
{
struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
bcma_chipco_gpio_outen(cc, 1 << gpio, 1 << gpio);
bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
return 0;
}
static int bcma_gpio_request(struct gpio_chip *chip, unsigned gpio)
{
struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
bcma_chipco_gpio_control(cc, 1 << gpio, 0);
/* clear pulldown */
bcma_chipco_gpio_pulldown(cc, 1 << gpio, 0);
/* Set pullup */
bcma_chipco_gpio_pullup(cc, 1 << gpio, 1 << gpio);
return 0;
}
static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
{
struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
/* clear pullup */
bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
}
int bcma_gpio_init(struct bcma_drv_cc *cc)
{
struct gpio_chip *chip = &cc->gpio;
chip->label = "bcma_gpio";
chip->owner = THIS_MODULE;
chip->request = bcma_gpio_request;
chip->free = bcma_gpio_free;
chip->get = bcma_gpio_get_value;
chip->set = bcma_gpio_set_value;
chip->direction_input = bcma_gpio_direction_input;
chip->direction_output = bcma_gpio_direction_output;
chip->ngpio = 16;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
* a random base number. */
if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
chip->base = 0;
else
chip->base = -1;
return gpiochip_add(chip);
}
......@@ -152,6 +152,11 @@ static int bcma_register_cores(struct bcma_bus *bus)
bcma_err(bus, "Error registering NAND flash\n");
}
#endif
err = bcma_gpio_init(&bus->drv_cc);
if (err == -ENOTSUPP)
bcma_debug(bus, "GPIO driver not activated\n");
else if (err)
bcma_err(bus, "Error registering GPIO driver: %i\n", err);
return 0;
}
......
......@@ -160,4 +160,13 @@ config SSB_DRIVER_GIGE
If unsure, say N
config SSB_DRIVER_GPIO
bool "SSB GPIO driver"
depends on SSB
select GPIOLIB
help
Driver to provide access to the GPIO pins on the bus.
If unsure, say N
endmenu
......@@ -15,6 +15,7 @@ ssb-$(CONFIG_SSB_DRIVER_MIPS) += driver_mipscore.o
ssb-$(CONFIG_SSB_DRIVER_EXTIF) += driver_extif.o
ssb-$(CONFIG_SSB_DRIVER_PCICORE) += driver_pcicore.o
ssb-$(CONFIG_SSB_DRIVER_GIGE) += driver_gige.o
ssb-$(CONFIG_SSB_DRIVER_GPIO) += driver_gpio.o
# b43 pci-ssb-bridge driver
# Not strictly a part of SSB, but kept here for convenience
......
......@@ -284,6 +284,9 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
{
if (!cc->dev)
return; /* We don't have a ChipCommon */
spin_lock_init(&cc->gpio_lock);
if (cc->dev->id.revision >= 11)
cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
......@@ -418,28 +421,93 @@ u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask)
u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value);
unsigned long flags;
u32 res = 0;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value);
unsigned long flags;
u32 res = 0;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
unsigned long flags;
u32 res = 0;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
EXPORT_SYMBOL(ssb_chipco_gpio_control);
u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value);
unsigned long flags;
u32 res = 0;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
return chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value);
unsigned long flags;
u32 res = 0;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
u32 ssb_chipco_gpio_pullup(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
unsigned long flags;
u32 res = 0;
if (cc->dev->id.revision < 20)
return 0xffffffff;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPULLUP, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
u32 ssb_chipco_gpio_pulldown(struct ssb_chipcommon *cc, u32 mask, u32 value)
{
unsigned long flags;
u32 res = 0;
if (cc->dev->id.revision < 20)
return 0xffffffff;
spin_lock_irqsave(&cc->gpio_lock, flags);
res = chipco_write32_masked(cc, SSB_CHIPCO_GPIOPULLDOWN, mask, value);
spin_unlock_irqrestore(&cc->gpio_lock, flags);
return res;
}
#ifdef CONFIG_SSB_SERIAL
......
......@@ -118,6 +118,13 @@ void ssb_extif_watchdog_timer_set(struct ssb_extif *extif,
extif_write32(extif, SSB_EXTIF_WATCHDOG, ticks);
}
void ssb_extif_init(struct ssb_extif *extif)
{
if (!extif->dev)
return; /* We don't have a Extif core */
spin_lock_init(&extif->gpio_lock);
}
u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask)
{
return extif_read32(extif, SSB_EXTIF_GPIO_IN) & mask;
......@@ -125,22 +132,50 @@ u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask)
u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value)
{
return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0),
unsigned long flags;
u32 res = 0;
spin_lock_irqsave(&extif->gpio_lock, flags);
res = extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0),
mask, value);
spin_unlock_irqrestore(&extif->gpio_lock, flags);
return res;
}
u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value)
{
return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0),
unsigned long flags;
u32 res = 0;
spin_lock_irqsave(&extif->gpio_lock, flags);
res = extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0),
mask, value);
spin_unlock_irqrestore(&extif->gpio_lock, flags);
return res;
}
u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value)
{
return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value);
unsigned long flags;
u32 res = 0;
spin_lock_irqsave(&extif->gpio_lock, flags);
res = extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value);
spin_unlock_irqrestore(&extif->gpio_lock, flags);
return res;
}
u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value)
{
return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value);
unsigned long flags;
u32 res = 0;
spin_lock_irqsave(&extif->gpio_lock, flags);
res = extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value);
spin_unlock_irqrestore(&extif->gpio_lock, flags);
return res;
}
/*
* Sonics Silicon Backplane
* GPIO driver
*
* Copyright 2011, Broadcom Corporation
* Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/gpio.h>
#include <linux/export.h>
#include <linux/ssb/ssb.h>
#include "ssb_private.h"
static struct ssb_bus *ssb_gpio_get_bus(struct gpio_chip *chip)
{
return container_of(chip, struct ssb_bus, gpio);
}
static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio);
}
static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio,
int value)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0);
}
static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip,
unsigned gpio)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 0);
return 0;
}
static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 1 << gpio);
ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0);
return 0;
}
static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
ssb_chipco_gpio_control(&bus->chipco, 1 << gpio, 0);
/* clear pulldown */
ssb_chipco_gpio_pulldown(&bus->chipco, 1 << gpio, 0);
/* Set pullup */
ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 1 << gpio);
return 0;
}
static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
/* clear pullup */
ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0);
}
static int ssb_gpio_chipco_init(struct ssb_bus *bus)
{
struct gpio_chip *chip = &bus->gpio;
chip->label = "ssb_chipco_gpio";
chip->owner = THIS_MODULE;
chip->request = ssb_gpio_chipco_request;
chip->free = ssb_gpio_chipco_free;
chip->get = ssb_gpio_chipco_get_value;
chip->set = ssb_gpio_chipco_set_value;
chip->direction_input = ssb_gpio_chipco_direction_input;
chip->direction_output = ssb_gpio_chipco_direction_output;
chip->ngpio = 16;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
* a random base number. */
if (bus->bustype == SSB_BUSTYPE_SSB)
chip->base = 0;
else
chip->base = -1;
return gpiochip_add(chip);
}
#ifdef CONFIG_SSB_DRIVER_EXTIF
static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio);
}
static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio,
int value)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0);
}
static int ssb_gpio_extif_direction_input(struct gpio_chip *chip,
unsigned gpio)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 0);
return 0;
}
static int ssb_gpio_extif_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
struct ssb_bus *bus = ssb_gpio_get_bus(chip);
ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 1 << gpio);
ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0);
return 0;
}
static int ssb_gpio_extif_init(struct ssb_bus *bus)
{
struct gpio_chip *chip = &bus->gpio;
chip->label = "ssb_extif_gpio";
chip->owner = THIS_MODULE;
chip->get = ssb_gpio_extif_get_value;
chip->set = ssb_gpio_extif_set_value;
chip->direction_input = ssb_gpio_extif_direction_input;
chip->direction_output = ssb_gpio_extif_direction_output;
chip->ngpio = 5;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
* a random base number. */
if (bus->bustype == SSB_BUSTYPE_SSB)
chip->base = 0;
else
chip->base = -1;
return gpiochip_add(chip);
}
#else
static int ssb_gpio_extif_init(struct ssb_bus *bus)
{
return -ENOTSUPP;
}
#endif
int ssb_gpio_init(struct ssb_bus *bus)
{
if (ssb_chipco_available(&bus->chipco))
return ssb_gpio_chipco_init(bus);
else if (ssb_extif_available(&bus->extif))
return ssb_gpio_extif_init(bus);
else
SSB_WARN_ON(1);
return -1;
}
......@@ -796,7 +796,14 @@ static int __devinit ssb_bus_register(struct ssb_bus *bus,
if (err)
goto err_pcmcia_exit;
ssb_chipcommon_init(&bus->chipco);
ssb_extif_init(&bus->extif);
ssb_mipscore_init(&bus->mipscore);
err = ssb_gpio_init(bus);
if (err == -ENOTSUPP)
ssb_dprintk(KERN_DEBUG PFX "GPIO driver not activated\n");
else if (err)
ssb_dprintk(KERN_ERR PFX
"Error registering GPIO driver: %i\n", err);
err = ssb_fetch_invariants(bus, get_invariants);
if (err) {
ssb_bus_may_powerdown(bus);
......
......@@ -211,4 +211,21 @@ static inline void b43_pci_ssb_bridge_exit(void)
extern u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc);
extern u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc);
#ifdef CONFIG_SSB_DRIVER_EXTIF
extern void ssb_extif_init(struct ssb_extif *extif);
#else
static inline void ssb_extif_init(struct ssb_extif *extif)
{
}
#endif
#ifdef CONFIG_SSB_DRIVER_GPIO
extern int ssb_gpio_init(struct ssb_bus *bus);
#else /* CONFIG_SSB_DRIVER_GPIO */
static inline int ssb_gpio_init(struct ssb_bus *bus)
{
return -ENOTSUPP;
}
#endif /* CONFIG_SSB_DRIVER_GPIO */
#endif /* LINUX_SSB_PRIVATE_H_ */
#ifndef LINUX_BCMA_DRIVER_CC_H_
#define LINUX_BCMA_DRIVER_CC_H_
#include <linux/gpio.h>
/** ChipCommon core registers. **/
#define BCMA_CC_ID 0x0000
#define BCMA_CC_ID_ID 0x0000FFFF
......@@ -567,6 +569,12 @@ struct bcma_drv_cc {
int nr_serial_ports;
struct bcma_serial_port serial_ports[4];
#endif /* CONFIG_BCMA_DRIVER_MIPS */
/* Lock for GPIO register access. */
spinlock_t gpio_lock;
#ifdef CONFIG_BCMA_DRIVER_GPIO
struct gpio_chip gpio;
#endif
};
/* Register access */
......@@ -603,6 +611,8 @@ u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value);
u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value);
/* PMU support */
extern void bcma_pmu_init(struct bcma_drv_cc *cc);
......
......@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/gpio.h>
#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
......@@ -433,6 +434,9 @@ struct ssb_bus {
/* Lock for GPIO register access. */
spinlock_t gpio_lock;
#endif /* EMBEDDED */
#ifdef CONFIG_SSB_DRIVER_GPIO
struct gpio_chip gpio;
#endif /* DRIVER_GPIO */
/* Internal-only stuff follows. Do not touch. */
struct list_head list;
......
......@@ -590,6 +590,7 @@ struct ssb_chipcommon {
u32 status;
/* Fast Powerup Delay constant */
u16 fast_pwrup_delay;
spinlock_t gpio_lock;
struct ssb_chipcommon_pmu pmu;
};
......@@ -644,6 +645,8 @@ u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value);
u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value);
u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value);
u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value);
u32 ssb_chipco_gpio_pullup(struct ssb_chipcommon *cc, u32 mask, u32 value);
u32 ssb_chipco_gpio_pulldown(struct ssb_chipcommon *cc, u32 mask, u32 value);
#ifdef CONFIG_SSB_SERIAL
extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
......
......@@ -158,6 +158,7 @@
struct ssb_extif {
struct ssb_device *dev;
spinlock_t gpio_lock;
};
static inline bool ssb_extif_available(struct ssb_extif *extif)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment