Commit 72e58063 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'davinci-for-linus' of...

Merge branch 'davinci-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-davinci

* 'davinci-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-davinci: (50 commits)
  davinci: fix remaining board support after io_pgoffst removal
  davinci: mityomapl138: make file local data static
  arm/davinci: remove duplicated include
  davinci: Initial support for Omapl138-Hawkboard
  davinci: MityDSP-L138/MityARM-1808 read MAC address from I2C Prom
  davinci: add tnetv107x touchscreen platform device
  input: add driver for tnetv107x touchscreen controller
  davinci: add keypad config for tnetv107x evm board
  davinci: add tnetv107x keypad platform device
  input: add driver for tnetv107x on-chip keypad controller
  net: davinci_emac: cleanup unused cpdma code
  net: davinci_emac: switch to new cpdma layer
  net: davinci_emac: separate out cpdma code
  net: davinci_emac: cleanup unused mdio emac code
  omap: cleanup unused davinci mdio arch code
  davinci: cleanup mdio arch code and switch to phy_id
  net: davinci_emac: switch to new mdio
  omap: add mdio platform devices
  davinci: add mdio platform devices
  net: davinci_emac: separate out davinci mdio
  ...

Fix up trivial conflict in drivers/input/keyboard/Kconfig (two entries
added next to each other - one from the davinci merge, one from the
input merge)
parents 57c155d5 489e176c
......@@ -5388,8 +5388,8 @@ F: drivers/*/*s3c2410*
F: drivers/*/*/*s3c2410*
TI DAVINCI MACHINE SUPPORT
P: Kevin Hilman
M: davinci-linux-open-source@linux.davincidsp.com
M: Kevin Hilman <khilman@deeprootsystems.com>
L: davinci-linux-open-source@linux.davincidsp.com (subscribers-only)
Q: http://patchwork.kernel.org/project/linux-davinci/list/
S: Supported
F: arch/arm/mach-davinci
......
......@@ -17,6 +17,8 @@ CONFIG_MODVERSIONS=y
CONFIG_ARCH_DAVINCI=y
CONFIG_ARCH_DAVINCI_DA830=y
CONFIG_ARCH_DAVINCI_DA850=y
CONFIG_MACH_MITYOMAPL138=y
CONFIG_MACH_OMAPL138_HAWKBOARD=y
CONFIG_DAVINCI_RESET_CLOCKS=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
......@@ -79,6 +81,7 @@ CONFIG_I2C_DAVINCI=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_DUMMY=y
CONFIG_REGULATOR_TPS6507X=y
CONFIG_FB=y
CONFIG_FB_DA8XX=y
......
......@@ -20,23 +20,23 @@ config ARCH_DAVINCI_DM644x
select ARCH_DAVINCI_DMx
config ARCH_DAVINCI_DM355
bool "DaVinci 355 based system"
bool "DaVinci 355 based system"
select AINTC
select ARCH_DAVINCI_DMx
config ARCH_DAVINCI_DM646x
bool "DaVinci 646x based system"
bool "DaVinci 646x based system"
select AINTC
select ARCH_DAVINCI_DMx
config ARCH_DAVINCI_DA830
bool "DA830/OMAP-L137 based system"
bool "DA830/OMAP-L137/AM17x based system"
select CP_INTC
select ARCH_DAVINCI_DA8XX
select CPU_DCACHE_WRITETHROUGH # needed on silicon revs 1.0, 1.1
config ARCH_DAVINCI_DA850
bool "DA850/OMAP-L138 based system"
bool "DA850/OMAP-L138/AM18x based system"
select CP_INTC
select ARCH_DAVINCI_DA8XX
select ARCH_HAS_CPUFREQ
......@@ -115,21 +115,21 @@ config MACH_DAVINCI_DM365_EVM
for development is a DM365 EVM
config MACH_DAVINCI_DA830_EVM
bool "TI DA830/OMAP-L137 Reference Platform"
bool "TI DA830/OMAP-L137/AM17x Reference Platform"
default ARCH_DAVINCI_DA830
depends on ARCH_DAVINCI_DA830
select GPIO_PCF857X
help
Say Y here to select the TI DA830/OMAP-L137 Evaluation Module.
Say Y here to select the TI DA830/OMAP-L137/AM17x Evaluation Module.
choice
prompt "Select DA830/OMAP-L137 UI board peripheral"
prompt "Select DA830/OMAP-L137/AM17x UI board peripheral"
depends on MACH_DAVINCI_DA830_EVM
help
The presence of UI card on the DA830/OMAP-L137 EVM is detected
automatically based on successful probe of the I2C based GPIO
expander on that board. This option selected in this menu has
an effect only in case of a successful UI card detection.
The presence of UI card on the DA830/OMAP-L137/AM17x EVM is
detected automatically based on successful probe of the I2C
based GPIO expander on that board. This option selected in this
menu has an effect only in case of a successful UI card detection.
config DA830_UI_LCD
bool "LCD"
......@@ -140,23 +140,23 @@ config DA830_UI_LCD
config DA830_UI_NAND
bool "NAND flash"
help
Say Y here to use the NAND flash. Do not forget to setup
Say Y here to use the NAND flash. Do not forget to setup
the switch correctly.
endchoice
config MACH_DAVINCI_DA850_EVM
bool "TI DA850/OMAP-L138 Reference Platform"
bool "TI DA850/OMAP-L138/AM18x Reference Platform"
default ARCH_DAVINCI_DA850
depends on ARCH_DAVINCI_DA850
select GPIO_PCA953X
help
Say Y here to select the TI DA850/OMAP-L138 Evaluation Module.
Say Y here to select the TI DA850/OMAP-L138/AM18x Evaluation Module.
choice
prompt "Select peripherals connected to expander on UI board"
depends on MACH_DAVINCI_DA850_EVM
help
The presence of User Interface (UI) card on the DA850/OMAP-L138
The presence of User Interface (UI) card on the DA850/OMAP-L138/AM18x
EVM is detected automatically based on successful probe of the I2C
based GPIO expander on that card. This option selected in this
menu has an effect only in case of a successful UI card detection.
......@@ -165,13 +165,13 @@ config DA850_UI_NONE
bool "No peripheral is enabled"
help
Say Y if you do not want to enable any of the peripherals connected
to TCA6416 expander on DA850/OMAP-L138 EVM UI card
to TCA6416 expander on DA850/OMAP-L138/AM18x EVM UI card
config DA850_UI_RMII
bool "RMII Ethernet PHY"
help
Say Y if you want to use the RMII PHY on the DA850/OMAP-L138 EVM.
This PHY is found on the UI daughter card that is supplied with
Say Y if you want to use the RMII PHY on the DA850/OMAP-L138/AM18x
EVM. This PHY is found on the UI daughter card that is supplied with
the EVM.
NOTE: Please take care while choosing this option, MII PHY will
not be functional if RMII mode is selected.
......@@ -185,6 +185,22 @@ config MACH_TNETV107X
help
Say Y here to select the TI TNETV107X Evaluation Module.
config MACH_MITYOMAPL138
bool "Critical Link MityDSP-L138/MityARM-1808 SoM"
depends on ARCH_DAVINCI_DA850
help
Say Y here to select the Critical Link MityDSP-L138/MityARM-1808
System on Module. Information on this SoM may be found at
http://www.mitydsp.com
config MACH_OMAPL138_HAWKBOARD
bool "TI AM1808 / OMAPL-138 Hawkboard platform"
depends on ARCH_DAVINCI_DA850
help
Say Y here to select the TI AM1808 / OMAPL-138 Hawkboard platform .
Information of this board may be found at
http://www.hawkboard.org/
config DAVINCI_MUX
bool "DAVINCI multiplexing support"
depends on ARCH_DAVINCI
......@@ -195,20 +211,20 @@ config DAVINCI_MUX
say Y.
config DAVINCI_MUX_DEBUG
bool "Multiplexing debug output"
depends on DAVINCI_MUX
help
Makes the multiplexing functions print out a lot of debug info.
This is useful if you want to find out the correct values of the
multiplexing registers.
bool "Multiplexing debug output"
depends on DAVINCI_MUX
help
Makes the multiplexing functions print out a lot of debug info.
This is useful if you want to find out the correct values of the
multiplexing registers.
config DAVINCI_MUX_WARNINGS
bool "Warn about pins the bootloader didn't set up"
depends on DAVINCI_MUX
help
Choose Y here to warn whenever driver initialization logic needs
to change the pin multiplexing setup. When there are no warnings
printed, it's safe to deselect DAVINCI_MUX for your product.
bool "Warn about pins the bootloader didn't set up"
depends on DAVINCI_MUX
help
Choose Y here to warn whenever driver initialization logic needs
to change the pin multiplexing setup. When there are no warnings
printed, it's safe to deselect DAVINCI_MUX for your product.
config DAVINCI_RESET_CLOCKS
bool "Reset unused clocks during boot"
......
......@@ -5,7 +5,7 @@
# Common objects
obj-y := time.o clock.o serial.o io.o psc.o \
gpio.o dma.o usb.o common.o sram.o
gpio.o dma.o usb.o common.o sram.o aemif.o
obj-$(CONFIG_DAVINCI_MUX) += mux.o
......@@ -33,6 +33,8 @@ obj-$(CONFIG_MACH_DAVINCI_DM365_EVM) += board-dm365-evm.o
obj-$(CONFIG_MACH_DAVINCI_DA830_EVM) += board-da830-evm.o
obj-$(CONFIG_MACH_DAVINCI_DA850_EVM) += board-da850-evm.o
obj-$(CONFIG_MACH_TNETV107X) += board-tnetv107x-evm.o
obj-$(CONFIG_MACH_MITYOMAPL138) += board-mityomapl138.o
obj-$(CONFIG_MACH_OMAPL138_HAWKBOARD) += board-omapl138-hawk.o
# Power Management
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
......
/*
* AEMIF support for DaVinci SoCs
*
* Copyright (C) 2010 Texas Instruments Incorporated. http://www.ti.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/time.h>
#include <mach/aemif.h>
/* Timing value configuration */
#define TA(x) ((x) << 2)
#define RHOLD(x) ((x) << 4)
#define RSTROBE(x) ((x) << 7)
#define RSETUP(x) ((x) << 13)
#define WHOLD(x) ((x) << 17)
#define WSTROBE(x) ((x) << 20)
#define WSETUP(x) ((x) << 26)
#define TA_MAX 0x3
#define RHOLD_MAX 0x7
#define RSTROBE_MAX 0x3f
#define RSETUP_MAX 0xf
#define WHOLD_MAX 0x7
#define WSTROBE_MAX 0x3f
#define WSETUP_MAX 0xf
#define TIMING_MASK (TA(TA_MAX) | \
RHOLD(RHOLD_MAX) | \
RSTROBE(RSTROBE_MAX) | \
RSETUP(RSETUP_MAX) | \
WHOLD(WHOLD_MAX) | \
WSTROBE(WSTROBE_MAX) | \
WSETUP(WSETUP_MAX))
/*
* aemif_calc_rate - calculate timing data.
* @wanted: The cycle time needed in nanoseconds.
* @clk: The input clock rate in kHz.
* @max: The maximum divider value that can be programmed.
*
* On success, returns the calculated timing value minus 1 for easy
* programming into AEMIF timing registers, else negative errno.
*/
static int aemif_calc_rate(int wanted, unsigned long clk, int max)
{
int result;
result = DIV_ROUND_UP((wanted * clk), NSEC_PER_MSEC) - 1;
pr_debug("%s: result %d from %ld, %d\n", __func__, result, clk, wanted);
/* It is generally OK to have a more relaxed timing than requested... */
if (result < 0)
result = 0;
/* ... But configuring tighter timings is not an option. */
else if (result > max)
result = -EINVAL;
return result;
}
/**
* davinci_aemif_setup_timing - setup timing values for a given AEMIF interface
* @t: timing values to be progammed
* @base: The virtual base address of the AEMIF interface
* @cs: chip-select to program the timing values for
*
* This function programs the given timing values (in real clock) into the
* AEMIF registers taking the AEMIF clock into account.
*
* This function does not use any locking while programming the AEMIF
* because it is expected that there is only one user of a given
* chip-select.
*
* Returns 0 on success, else negative errno.
*/
int davinci_aemif_setup_timing(struct davinci_aemif_timing *t,
void __iomem *base, unsigned cs)
{
unsigned set, val;
unsigned ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
unsigned offset = A1CR_OFFSET + cs * 4;
struct clk *aemif_clk;
unsigned long clkrate;
if (!t)
return 0; /* Nothing to do */
aemif_clk = clk_get(NULL, "aemif");
if (IS_ERR(aemif_clk))
return PTR_ERR(aemif_clk);
clkrate = clk_get_rate(aemif_clk);
clkrate /= 1000; /* turn clock into kHz for ease of use */
ta = aemif_calc_rate(t->ta, clkrate, TA_MAX);
rhold = aemif_calc_rate(t->rhold, clkrate, RHOLD_MAX);
rstrobe = aemif_calc_rate(t->rstrobe, clkrate, RSTROBE_MAX);
rsetup = aemif_calc_rate(t->rsetup, clkrate, RSETUP_MAX);
whold = aemif_calc_rate(t->whold, clkrate, WHOLD_MAX);
wstrobe = aemif_calc_rate(t->wstrobe, clkrate, WSTROBE_MAX);
wsetup = aemif_calc_rate(t->wsetup, clkrate, WSETUP_MAX);
if (ta < 0 || rhold < 0 || rstrobe < 0 || rsetup < 0 ||
whold < 0 || wstrobe < 0 || wsetup < 0) {
pr_err("%s: cannot get suitable timings\n", __func__);
return -EINVAL;
}
set = TA(ta) | RHOLD(rhold) | RSTROBE(rstrobe) | RSETUP(rsetup) |
WHOLD(whold) | WSTROBE(wstrobe) | WSETUP(wsetup);
val = __raw_readl(base + offset);
val &= ~TIMING_MASK;
val |= set;
__raw_writel(val, base + offset);
return 0;
}
EXPORT_SYMBOL(davinci_aemif_setup_timing);
......@@ -29,10 +29,9 @@
#include <mach/nand.h>
#include <mach/da8xx.h>
#include <mach/usb.h>
#include <mach/aemif.h>
#define DA830_EVM_PHY_MASK 0x0
#define DA830_EVM_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
#define DA830_EVM_PHY_ID ""
/*
* USB1 VBUS is controlled by GPIO1[15], over-current is reported on GPIO2[4].
*/
......@@ -360,6 +359,16 @@ static struct nand_bbt_descr da830_evm_nand_bbt_mirror_descr = {
.pattern = da830_evm_nand_mirror_pattern
};
static struct davinci_aemif_timing da830_evm_nandflash_timing = {
.wsetup = 24,
.wstrobe = 21,
.whold = 14,
.rsetup = 19,
.rstrobe = 50,
.rhold = 0,
.ta = 20,
};
static struct davinci_nand_pdata da830_evm_nand_pdata = {
.parts = da830_evm_nand_partitions,
.nr_parts = ARRAY_SIZE(da830_evm_nand_partitions),
......@@ -368,6 +377,7 @@ static struct davinci_nand_pdata da830_evm_nand_pdata = {
.options = NAND_USE_FLASH_BBT,
.bbt_td = &da830_evm_nand_bbt_main_descr,
.bbt_md = &da830_evm_nand_bbt_mirror_descr,
.timing = &da830_evm_nandflash_timing,
};
static struct resource da830_evm_nand_resources[] = {
......@@ -546,9 +556,8 @@ static __init void da830_evm_init(void)
da830_evm_usb_init();
soc_info->emac_pdata->phy_mask = DA830_EVM_PHY_MASK;
soc_info->emac_pdata->mdio_max_freq = DA830_EVM_MDIO_FREQUENCY;
soc_info->emac_pdata->rmii_en = 1;
soc_info->emac_pdata->phy_id = DA830_EVM_PHY_ID;
ret = davinci_cfg_reg_list(da830_cpgmac_pins);
if (ret)
......@@ -586,6 +595,9 @@ static __init void da830_evm_init(void)
#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init da830_evm_console_init(void)
{
if (!machine_is_davinci_da830_evm())
return 0;
return add_preferred_console("ttyS", 2, "115200");
}
console_initcall(da830_evm_console_init);
......@@ -596,7 +608,7 @@ static void __init da830_evm_map_io(void)
da830_init();
}
MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137 EVM")
MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
.boot_params = (DA8XX_DDR_BASE + 0x100),
.map_io = da830_evm_map_io,
.init_irq = cp_intc_init,
......
......@@ -26,7 +26,6 @@
#include <linux/mtd/physmap.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/tps6507x.h>
#include <linux/mfd/tps6507x.h>
#include <linux/input/tps6507x-ts.h>
#include <asm/mach-types.h>
......@@ -36,10 +35,9 @@
#include <mach/da8xx.h>
#include <mach/nand.h>
#include <mach/mux.h>
#include <mach/aemif.h>
#define DA850_EVM_PHY_MASK 0x1
#define DA850_EVM_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
#define DA850_EVM_PHY_ID "0:00"
#define DA850_LCD_PWR_PIN GPIO_TO_PIN(2, 8)
#define DA850_LCD_BL_PIN GPIO_TO_PIN(2, 15)
......@@ -110,7 +108,7 @@ static struct platform_device da850_pm_device = {
* to boot, using TI's tools to install the secondary boot loader
* (UBL) and U-Boot.
*/
struct mtd_partition da850_evm_nandflash_partition[] = {
static struct mtd_partition da850_evm_nandflash_partition[] = {
{
.name = "u-boot env",
.offset = 0,
......@@ -143,12 +141,23 @@ struct mtd_partition da850_evm_nandflash_partition[] = {
},
};
static struct davinci_aemif_timing da850_evm_nandflash_timing = {
.wsetup = 24,
.wstrobe = 21,
.whold = 14,
.rsetup = 19,
.rstrobe = 50,
.rhold = 0,
.ta = 20,
};
static struct davinci_nand_pdata da850_evm_nandflash_data = {
.parts = da850_evm_nandflash_partition,
.nr_parts = ARRAY_SIZE(da850_evm_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
.ecc_bits = 4,
.options = NAND_USE_FLASH_BBT,
.timing = &da850_evm_nandflash_timing,
};
static struct resource da850_evm_nandflash_resource[] = {
......@@ -196,6 +205,30 @@ static void __init da850_evm_init_nor(void)
iounmap(aemif_addr);
}
static const short da850_evm_nand_pins[] = {
DA850_EMA_D_0, DA850_EMA_D_1, DA850_EMA_D_2, DA850_EMA_D_3,
DA850_EMA_D_4, DA850_EMA_D_5, DA850_EMA_D_6, DA850_EMA_D_7,
DA850_EMA_A_1, DA850_EMA_A_2, DA850_NEMA_CS_3, DA850_NEMA_CS_4,
DA850_NEMA_WE, DA850_NEMA_OE,
-1
};
static const short da850_evm_nor_pins[] = {
DA850_EMA_BA_1, DA850_EMA_CLK, DA850_EMA_WAIT_1, DA850_NEMA_CS_2,
DA850_NEMA_WE, DA850_NEMA_OE, DA850_EMA_D_0, DA850_EMA_D_1,
DA850_EMA_D_2, DA850_EMA_D_3, DA850_EMA_D_4, DA850_EMA_D_5,
DA850_EMA_D_6, DA850_EMA_D_7, DA850_EMA_D_8, DA850_EMA_D_9,
DA850_EMA_D_10, DA850_EMA_D_11, DA850_EMA_D_12, DA850_EMA_D_13,
DA850_EMA_D_14, DA850_EMA_D_15, DA850_EMA_A_0, DA850_EMA_A_1,
DA850_EMA_A_2, DA850_EMA_A_3, DA850_EMA_A_4, DA850_EMA_A_5,
DA850_EMA_A_6, DA850_EMA_A_7, DA850_EMA_A_8, DA850_EMA_A_9,
DA850_EMA_A_10, DA850_EMA_A_11, DA850_EMA_A_12, DA850_EMA_A_13,
DA850_EMA_A_14, DA850_EMA_A_15, DA850_EMA_A_16, DA850_EMA_A_17,
DA850_EMA_A_18, DA850_EMA_A_19, DA850_EMA_A_20, DA850_EMA_A_21,
DA850_EMA_A_22, DA850_EMA_A_23,
-1
};
static u32 ui_card_detected;
#if defined(CONFIG_MMC_DAVINCI) || \
......@@ -205,17 +238,17 @@ static u32 ui_card_detected;
#define HAS_MMC 0
#endif
static __init void da850_evm_setup_nor_nand(void)
static inline void da850_evm_setup_nor_nand(void)
{
int ret = 0;
if (ui_card_detected & !HAS_MMC) {
ret = davinci_cfg_reg_list(da850_nand_pins);
ret = davinci_cfg_reg_list(da850_evm_nand_pins);
if (ret)
pr_warning("da850_evm_init: nand mux setup failed: "
"%d\n", ret);
ret = davinci_cfg_reg_list(da850_nor_pins);
ret = davinci_cfg_reg_list(da850_evm_nor_pins);
if (ret)
pr_warning("da850_evm_init: nor mux setup failed: %d\n",
ret);
......@@ -406,7 +439,7 @@ static int da850_lcd_hw_init(void)
/* TPS65070 voltage regulator support */
/* 3.3V */
struct regulator_consumer_supply tps65070_dcdc1_consumers[] = {
static struct regulator_consumer_supply tps65070_dcdc1_consumers[] = {
{
.supply = "usb0_vdda33",
},
......@@ -416,7 +449,7 @@ struct regulator_consumer_supply tps65070_dcdc1_consumers[] = {
};
/* 3.3V or 1.8V */
struct regulator_consumer_supply tps65070_dcdc2_consumers[] = {
static struct regulator_consumer_supply tps65070_dcdc2_consumers[] = {
{
.supply = "dvdd3318_a",
},
......@@ -429,14 +462,14 @@ struct regulator_consumer_supply tps65070_dcdc2_consumers[] = {
};
/* 1.2V */
struct regulator_consumer_supply tps65070_dcdc3_consumers[] = {
static struct regulator_consumer_supply tps65070_dcdc3_consumers[] = {
{
.supply = "cvdd",
},
};
/* 1.8V LDO */
struct regulator_consumer_supply tps65070_ldo1_consumers[] = {
static struct regulator_consumer_supply tps65070_ldo1_consumers[] = {
{
.supply = "sata_vddr",
},
......@@ -452,7 +485,7 @@ struct regulator_consumer_supply tps65070_ldo1_consumers[] = {
};
/* 1.2V LDO */
struct regulator_consumer_supply tps65070_ldo2_consumers[] = {
static struct regulator_consumer_supply tps65070_ldo2_consumers[] = {
{
.supply = "sata_vdd",
},
......@@ -475,7 +508,7 @@ static struct tps6507x_reg_platform_data tps6507x_platform_data = {
.defdcdc_default = true,
};
struct regulator_init_data tps65070_regulator_data[] = {
static struct regulator_init_data tps65070_regulator_data[] = {
/* dcdc1 */
{
.constraints = {
......@@ -576,6 +609,23 @@ static const short da850_evm_lcdc_pins[] = {
-1
};
static const short da850_evm_mii_pins[] = {
DA850_MII_TXEN, DA850_MII_TXCLK, DA850_MII_COL, DA850_MII_TXD_3,
DA850_MII_TXD_2, DA850_MII_TXD_1, DA850_MII_TXD_0, DA850_MII_RXER,
DA850_MII_CRS, DA850_MII_RXCLK, DA850_MII_RXDV, DA850_MII_RXD_3,
DA850_MII_RXD_2, DA850_MII_RXD_1, DA850_MII_RXD_0, DA850_MDIO_CLK,
DA850_MDIO_D,
-1
};
static const short da850_evm_rmii_pins[] = {
DA850_RMII_TXD_0, DA850_RMII_TXD_1, DA850_RMII_TXEN,
DA850_RMII_CRS_DV, DA850_RMII_RXD_0, DA850_RMII_RXD_1,
DA850_RMII_RXER, DA850_RMII_MHZ_50_CLK, DA850_MDIO_CLK,
DA850_MDIO_D,
-1
};
static int __init da850_evm_config_emac(void)
{
void __iomem *cfg_chip3_base;
......@@ -593,12 +643,12 @@ static int __init da850_evm_config_emac(void)
if (rmii_en) {
val |= BIT(8);
ret = davinci_cfg_reg_list(da850_rmii_pins);
ret = davinci_cfg_reg_list(da850_evm_rmii_pins);
pr_info("EMAC: RMII PHY configured, MII PHY will not be"
" functional\n");
} else {
val &= ~BIT(8);
ret = davinci_cfg_reg_list(da850_cpgmac_pins);
ret = davinci_cfg_reg_list(da850_evm_mii_pins);
pr_info("EMAC: MII PHY configured, RMII PHY will not be"
" functional\n");
}
......@@ -625,8 +675,7 @@ static int __init da850_evm_config_emac(void)
/* Enable/Disable MII MDIO clock */
gpio_direction_output(DA850_MII_MDIO_CLKEN_PIN, rmii_en);
soc_info->emac_pdata->phy_mask = DA850_EVM_PHY_MASK;
soc_info->emac_pdata->mdio_max_freq = DA850_EVM_MDIO_FREQUENCY;
soc_info->emac_pdata->phy_id = DA850_EVM_PHY_ID;
ret = da8xx_register_emac();
if (ret)
......@@ -787,7 +836,7 @@ static __init void da850_evm_init(void)
if (ret)
pr_warning("da850_evm_init: rtc setup failed: %d\n", ret);
ret = da850_register_cpufreq();
ret = da850_register_cpufreq("pll0_sysclk3");
if (ret)
pr_warning("da850_evm_init: cpufreq registration failed: %d\n",
ret);
......@@ -806,6 +855,9 @@ static __init void da850_evm_init(void)
#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init da850_evm_console_init(void)
{
if (!machine_is_davinci_da850_evm())
return 0;
return add_preferred_console("ttyS", 2, "115200");
}
console_initcall(da850_evm_console_init);
......@@ -816,7 +868,7 @@ static void __init da850_evm_map_io(void)
da850_init();
}
MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138 EVM")
MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
.boot_params = (DA8XX_DDR_BASE + 0x100),
.map_io = da850_evm_map_io,
.init_irq = cp_intc_init,
......
......@@ -54,9 +54,7 @@ static inline int have_tvp7002(void)
return 0;
}
#define DM365_EVM_PHY_MASK (0x2)
#define DM365_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
#define DM365_EVM_PHY_ID "0:01"
/*
* A MAX-II CPLD is used for various board control functions.
*/
......@@ -175,7 +173,9 @@ static struct at24_platform_data eeprom_info = {
.context = (void *)0x7f00,
};
static struct snd_platform_data dm365_evm_snd_data;
static struct snd_platform_data dm365_evm_snd_data = {
.asp_chan_q = EVENTQ_3,
};
static struct i2c_board_info i2c_info[] = {
{
......@@ -533,8 +533,7 @@ static void __init evm_init_cpld(void)
/* ... and ENET ... */
dm365evm_emac_configure();
soc_info->emac_pdata->phy_mask = DM365_EVM_PHY_MASK;
soc_info->emac_pdata->mdio_max_freq = DM365_EVM_MDIO_FREQUENCY;
soc_info->emac_pdata->phy_id = DM365_EVM_PHY_ID;
resets &= ~BIT(3);
/* ... and AIC33 */
......
......@@ -37,10 +37,9 @@
#include <mach/nand.h>
#include <mach/mmc.h>
#include <mach/usb.h>
#include <mach/aemif.h>
#define DM644X_EVM_PHY_MASK (0x2)
#define DM644X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
#define DM644X_EVM_PHY_ID "0:01"
#define LXT971_PHY_ID (0x001378e2)
#define LXT971_PHY_MASK (0xfffffff0)
......@@ -137,11 +136,22 @@ static struct mtd_partition davinci_evm_nandflash_partition[] = {
*/
};
static struct davinci_aemif_timing davinci_evm_nandflash_timing = {
.wsetup = 20,
.wstrobe = 40,
.whold = 20,
.rsetup = 10,
.rstrobe = 40,
.rhold = 10,
.ta = 40,
};
static struct davinci_nand_pdata davinci_evm_nandflash_data = {
.parts = davinci_evm_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
.options = NAND_USE_FLASH_BBT,
.timing = &davinci_evm_nandflash_timing,
};
static struct resource davinci_evm_nandflash_resource[] = {
......@@ -695,9 +705,7 @@ static __init void davinci_evm_init(void)
davinci_serial_init(&uart_config);
dm644x_init_asp(&dm644x_evm_snd_data);
soc_info->emac_pdata->phy_mask = DM644X_EVM_PHY_MASK;
soc_info->emac_pdata->mdio_max_freq = DM644X_EVM_MDIO_FREQUENCY;
soc_info->emac_pdata->phy_id = DM644X_EVM_PHY_ID;
/* Register the fixup for PHY on DaVinci */
phy_register_fixup_for_uid(LXT971_PHY_ID, LXT971_PHY_MASK,
davinci_phy_fixup);
......
......@@ -42,6 +42,7 @@
#include <mach/nand.h>
#include <mach/clock.h>
#include <mach/cdce949.h>
#include <mach/aemif.h>
#include "clock.h"
......@@ -71,6 +72,16 @@ static struct mtd_partition davinci_nand_partitions[] = {
}
};
static struct davinci_aemif_timing dm6467tevm_nandflash_timing = {
.wsetup = 29,
.wstrobe = 24,
.whold = 14,
.rsetup = 19,
.rstrobe = 33,
.rhold = 0,
.ta = 29,
};
static struct davinci_nand_pdata davinci_nand_data = {
.mask_cle = 0x80000,
.mask_ale = 0x40000,
......@@ -718,9 +729,7 @@ static struct davinci_uart_config uart_config __initdata = {
.enabled_uarts = (1 << 0),
};
#define DM646X_EVM_PHY_MASK (0x2)
#define DM646X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
#define DM646X_EVM_PHY_ID "0:01"
/*
* The following EDMA channels/slots are not being used by drivers (for
* example: Timer, GPIO, UART events etc) on dm646x, hence they are being
......@@ -763,6 +772,9 @@ static __init void evm_init(void)
dm646x_init_mcasp0(&dm646x_evm_snd_data[0]);
dm646x_init_mcasp1(&dm646x_evm_snd_data[1]);
if (machine_is_davinci_dm6467tevm())
davinci_nand_data.timing = &dm6467tevm_nandflash_timing;
platform_device_register(&davinci_nand_device);
dm646x_init_edma(dm646x_edma_rsv);
......@@ -770,8 +782,7 @@ static __init void evm_init(void)
if (HAS_ATA)
davinci_init_ide();
soc_info->emac_pdata->phy_mask = DM646X_EVM_PHY_MASK;
soc_info->emac_pdata->mdio_max_freq = DM646X_EVM_MDIO_FREQUENCY;
soc_info->emac_pdata->phy_id = DM646X_EVM_PHY_ID;
}
#define DM646X_EVM_REF_FREQ 27000000
......
/*
* Critical Link MityOMAP-L138 SoM
*
* Copyright (C) 2010 Critical Link LLC - http://www.criticallink.com
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of
* any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <linux/regulator/machine.h>
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
#include <linux/etherdevice.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/common.h>
#include <mach/cp_intc.h>
#include <mach/da8xx.h>
#include <mach/nand.h>
#include <mach/mux.h>
#define MITYOMAPL138_PHY_ID "0:03"
#define FACTORY_CONFIG_MAGIC 0x012C0138
#define FACTORY_CONFIG_VERSION 0x00010001
/* Data Held in On-Board I2C device */
struct factory_config {
u32 magic;
u32 version;
u8 mac[6];
u32 fpga_type;
u32 spare;
u32 serialnumber;
char partnum[32];
};
static struct factory_config factory_config;
static void read_factory_config(struct memory_accessor *a, void *context)
{
int ret;
struct davinci_soc_info *soc_info = &davinci_soc_info;
ret = a->read(a, (char *)&factory_config, 0, sizeof(factory_config));
if (ret != sizeof(struct factory_config)) {
pr_warning("MityOMAPL138: Read Factory Config Failed: %d\n",
ret);
return;
}
if (factory_config.magic != FACTORY_CONFIG_MAGIC) {
pr_warning("MityOMAPL138: Factory Config Magic Wrong (%X)\n",
factory_config.magic);
return;
}
if (factory_config.version != FACTORY_CONFIG_VERSION) {
pr_warning("MityOMAPL138: Factory Config Version Wrong (%X)\n",
factory_config.version);
return;
}
pr_info("MityOMAPL138: Found MAC = %pM\n", factory_config.mac);
pr_info("MityOMAPL138: Part Number = %s\n", factory_config.partnum);
if (is_valid_ether_addr(factory_config.mac))
memcpy(soc_info->emac_pdata->mac_addr,
factory_config.mac, ETH_ALEN);
else
pr_warning("MityOMAPL138: Invalid MAC found "
"in factory config block\n");
}
static struct at24_platform_data mityomapl138_fd_chip = {
.byte_len = 256,
.page_size = 8,
.flags = AT24_FLAG_READONLY | AT24_FLAG_IRUGO,
.setup = read_factory_config,
.context = NULL,
};
static struct davinci_i2c_platform_data mityomap_i2c_0_pdata = {
.bus_freq = 100, /* kHz */
.bus_delay = 0, /* usec */
};
/* TPS65023 voltage regulator support */
/* 1.2V Core */
static struct regulator_consumer_supply tps65023_dcdc1_consumers[] = {
{
.supply = "cvdd",
},
};
/* 1.8V */
static struct regulator_consumer_supply tps65023_dcdc2_consumers[] = {
{
.supply = "usb0_vdda18",
},
{
.supply = "usb1_vdda18",
},
{
.supply = "ddr_dvdd18",
},
{
.supply = "sata_vddr",
},
};
/* 1.2V */
static struct regulator_consumer_supply tps65023_dcdc3_consumers[] = {
{
.supply = "sata_vdd",
},
{
.supply = "usb_cvdd",
},
{
.supply = "pll0_vdda",
},
{
.supply = "pll1_vdda",
},
};
/* 1.8V Aux LDO, not used */
static struct regulator_consumer_supply tps65023_ldo1_consumers[] = {
{
.supply = "1.8v_aux",
},
};
/* FPGA VCC Aux (2.5 or 3.3) LDO */
static struct regulator_consumer_supply tps65023_ldo2_consumers[] = {
{
.supply = "vccaux",
},
};
static struct regulator_init_data tps65023_regulator_data[] = {
/* dcdc1 */
{
.constraints = {
.min_uV = 1150000,
.max_uV = 1350000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.boot_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(tps65023_dcdc1_consumers),
.consumer_supplies = tps65023_dcdc1_consumers,
},
/* dcdc2 */
{
.constraints = {
.min_uV = 1800000,
.max_uV = 1800000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.boot_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(tps65023_dcdc2_consumers),
.consumer_supplies = tps65023_dcdc2_consumers,
},
/* dcdc3 */
{
.constraints = {
.min_uV = 1200000,
.max_uV = 1200000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.boot_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(tps65023_dcdc3_consumers),
.consumer_supplies = tps65023_dcdc3_consumers,
},
/* ldo1 */
{
.constraints = {
.min_uV = 1800000,
.max_uV = 1800000,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.boot_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(tps65023_ldo1_consumers),
.consumer_supplies = tps65023_ldo1_consumers,
},
/* ldo2 */
{
.constraints = {
.min_uV = 2500000,
.max_uV = 3300000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.boot_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(tps65023_ldo2_consumers),
.consumer_supplies = tps65023_ldo2_consumers,
},
};
static struct i2c_board_info __initdata mityomap_tps65023_info[] = {
{
I2C_BOARD_INFO("tps65023", 0x48),
.platform_data = &tps65023_regulator_data[0],
},
{
I2C_BOARD_INFO("24c02", 0x50),
.platform_data = &mityomapl138_fd_chip,
},
};
static int __init pmic_tps65023_init(void)
{
return i2c_register_board_info(1, mityomap_tps65023_info,
ARRAY_SIZE(mityomap_tps65023_info));
}
/*
* MityDSP-L138 includes a 256 MByte large-page NAND flash
* (128K blocks).
*/
static struct mtd_partition mityomapl138_nandflash_partition[] = {
{
.name = "rootfs",
.offset = 0,
.size = SZ_128M,
.mask_flags = 0, /* MTD_WRITEABLE, */
},
{
.name = "homefs",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0,
},
};
static struct davinci_nand_pdata mityomapl138_nandflash_data = {
.parts = mityomapl138_nandflash_partition,
.nr_parts = ARRAY_SIZE(mityomapl138_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
.options = NAND_USE_FLASH_BBT | NAND_BUSWIDTH_16,
.ecc_bits = 1, /* 4 bit mode is not supported with 16 bit NAND */
};
static struct resource mityomapl138_nandflash_resource[] = {
{
.start = DA8XX_AEMIF_CS3_BASE,
.end = DA8XX_AEMIF_CS3_BASE + SZ_512K + 2 * SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
{
.start = DA8XX_AEMIF_CTL_BASE,
.end = DA8XX_AEMIF_CTL_BASE + SZ_32K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device mityomapl138_nandflash_device = {
.name = "davinci_nand",
.id = 0,
.dev = {
.platform_data = &mityomapl138_nandflash_data,
},
.num_resources = ARRAY_SIZE(mityomapl138_nandflash_resource),
.resource = mityomapl138_nandflash_resource,
};
static struct platform_device *mityomapl138_devices[] __initdata = {
&mityomapl138_nandflash_device,
};
static void __init mityomapl138_setup_nand(void)
{
platform_add_devices(mityomapl138_devices,
ARRAY_SIZE(mityomapl138_devices));
}
static struct davinci_uart_config mityomapl138_uart_config __initdata = {
.enabled_uarts = 0x7,
};
static const short mityomap_mii_pins[] = {
DA850_MII_TXEN, DA850_MII_TXCLK, DA850_MII_COL, DA850_MII_TXD_3,
DA850_MII_TXD_2, DA850_MII_TXD_1, DA850_MII_TXD_0, DA850_MII_RXER,
DA850_MII_CRS, DA850_MII_RXCLK, DA850_MII_RXDV, DA850_MII_RXD_3,
DA850_MII_RXD_2, DA850_MII_RXD_1, DA850_MII_RXD_0, DA850_MDIO_CLK,
DA850_MDIO_D,
-1
};
static const short mityomap_rmii_pins[] = {
DA850_RMII_TXD_0, DA850_RMII_TXD_1, DA850_RMII_TXEN,
DA850_RMII_CRS_DV, DA850_RMII_RXD_0, DA850_RMII_RXD_1,
DA850_RMII_RXER, DA850_RMII_MHZ_50_CLK, DA850_MDIO_CLK,
DA850_MDIO_D,
-1
};
static void __init mityomapl138_config_emac(void)
{
void __iomem *cfg_chip3_base;
int ret;
u32 val;
struct davinci_soc_info *soc_info = &davinci_soc_info;
soc_info->emac_pdata->rmii_en = 0; /* hardcoded for now */
cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
val = __raw_readl(cfg_chip3_base);
if (soc_info->emac_pdata->rmii_en) {
val |= BIT(8);
ret = davinci_cfg_reg_list(mityomap_rmii_pins);
pr_info("RMII PHY configured\n");
} else {
val &= ~BIT(8);
ret = davinci_cfg_reg_list(mityomap_mii_pins);
pr_info("MII PHY configured\n");
}
if (ret) {
pr_warning("mii/rmii mux setup failed: %d\n", ret);
return;
}
/* configure the CFGCHIP3 register for RMII or MII */
__raw_writel(val, cfg_chip3_base);
soc_info->emac_pdata->phy_id = MITYOMAPL138_PHY_ID;
ret = da8xx_register_emac();
if (ret)
pr_warning("emac registration failed: %d\n", ret);
}
static struct davinci_pm_config da850_pm_pdata = {
.sleepcount = 128,
};
static struct platform_device da850_pm_device = {
.name = "pm-davinci",
.dev = {
.platform_data = &da850_pm_pdata,
},
.id = -1,
};
static void __init mityomapl138_init(void)
{
int ret;
/* for now, no special EDMA channels are reserved */
ret = da850_register_edma(NULL);
if (ret)
pr_warning("edma registration failed: %d\n", ret);
ret = da8xx_register_watchdog();
if (ret)
pr_warning("watchdog registration failed: %d\n", ret);
davinci_serial_init(&mityomapl138_uart_config);
ret = da8xx_register_i2c(0, &mityomap_i2c_0_pdata);
if (ret)
pr_warning("i2c0 registration failed: %d\n", ret);
ret = pmic_tps65023_init();
if (ret)
pr_warning("TPS65023 PMIC init failed: %d\n", ret);
mityomapl138_setup_nand();
mityomapl138_config_emac();
ret = da8xx_register_rtc();
if (ret)
pr_warning("rtc setup failed: %d\n", ret);
ret = da850_register_cpufreq("pll0_sysclk3");
if (ret)
pr_warning("cpufreq registration failed: %d\n", ret);
ret = da8xx_register_cpuidle();
if (ret)
pr_warning("cpuidle registration failed: %d\n", ret);
ret = da850_register_pm(&da850_pm_device);
if (ret)
pr_warning("da850_evm_init: suspend registration failed: %d\n",
ret);
}
#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init mityomapl138_console_init(void)
{
if (!machine_is_mityomapl138())
return 0;
return add_preferred_console("ttyS", 1, "115200");
}
console_initcall(mityomapl138_console_init);
#endif
static void __init mityomapl138_map_io(void)
{
da850_init();
}
MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
.boot_params = (DA8XX_DDR_BASE + 0x100),
.map_io = mityomapl138_map_io,
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = mityomapl138_init,
MACHINE_END
......@@ -39,9 +39,7 @@
#include <mach/mmc.h>
#include <mach/usb.h>
#define NEUROS_OSD2_PHY_MASK 0x2
#define NEUROS_OSD2_MDIO_FREQUENCY 2200000 /* PHY bus frequency */
#define NEUROS_OSD2_PHY_ID "0:01"
#define LXT971_PHY_ID 0x001378e2
#define LXT971_PHY_MASK 0xfffffff0
......@@ -252,8 +250,7 @@ static __init void davinci_ntosd2_init(void)
davinci_serial_init(&uart_config);
dm644x_init_asp(&dm644x_ntosd2_snd_data);
soc_info->emac_pdata->phy_mask = NEUROS_OSD2_PHY_MASK;
soc_info->emac_pdata->mdio_max_freq = NEUROS_OSD2_MDIO_FREQUENCY;
soc_info->emac_pdata->phy_id = NEUROS_OSD2_PHY_ID;
davinci_setup_usb(1000, 8);
/*
......
/*
* Hawkboard.org based on TI's OMAP-L138 Platform
*
* Initial code: Syed Mohammed Khasim
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of
* any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/cp_intc.h>
#include <mach/da8xx.h>
static struct davinci_uart_config omapl138_hawk_uart_config __initdata = {
.enabled_uarts = 0x7,
};
static __init void omapl138_hawk_init(void)
{
int ret;
davinci_serial_init(&omapl138_hawk_uart_config);
ret = da8xx_register_watchdog();
if (ret)
pr_warning("omapl138_hawk_init: "
"watchdog registration failed: %d\n",
ret);
}
#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init omapl138_hawk_console_init(void)
{
if (!machine_is_omapl138_hawkboard())
return 0;
return add_preferred_console("ttyS", 2, "115200");
}
console_initcall(omapl138_hawk_console_init);
#endif
static void __init omapl138_hawk_map_io(void)
{
da850_init();
}
MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
.boot_params = (DA8XX_DDR_BASE + 0x100),
.map_io = omapl138_hawk_map_io,
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = omapl138_hawk_init,
MACHINE_END
......@@ -42,9 +42,7 @@
#include <mach/mux.h>
#include <mach/usb.h>
#define SFFSDR_PHY_MASK (0x2)
#define SFFSDR_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
#define SFFSDR_PHY_ID "0:01"
static struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
/* U-Boot Environment: Block 0
* UBL: Block 1
......@@ -143,8 +141,7 @@ static __init void davinci_sffsdr_init(void)
ARRAY_SIZE(davinci_sffsdr_devices));
sffsdr_init_i2c();
davinci_serial_init(&uart_config);
soc_info->emac_pdata->phy_mask = SFFSDR_PHY_MASK;
soc_info->emac_pdata->mdio_max_freq = SFFSDR_MDIO_FREQUENCY;
soc_info->emac_pdata->phy_id = SFFSDR_PHY_ID;
davinci_setup_usb(0, 0); /* We support only peripheral mode. */
/* mux VLYNQ pins */
......
......@@ -23,6 +23,9 @@
#include <linux/ratelimit.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
......@@ -141,10 +144,63 @@ static struct davinci_uart_config serial_config __initconst = {
.enabled_uarts = BIT(1),
};
static const uint32_t keymap[] = {
KEY(0, 0, KEY_NUMERIC_1),
KEY(0, 1, KEY_NUMERIC_2),
KEY(0, 2, KEY_NUMERIC_3),
KEY(0, 3, KEY_FN_F1),
KEY(0, 4, KEY_MENU),
KEY(1, 0, KEY_NUMERIC_4),
KEY(1, 1, KEY_NUMERIC_5),
KEY(1, 2, KEY_NUMERIC_6),
KEY(1, 3, KEY_UP),
KEY(1, 4, KEY_FN_F2),
KEY(2, 0, KEY_NUMERIC_7),
KEY(2, 1, KEY_NUMERIC_8),
KEY(2, 2, KEY_NUMERIC_9),
KEY(2, 3, KEY_LEFT),
KEY(2, 4, KEY_ENTER),
KEY(3, 0, KEY_NUMERIC_STAR),
KEY(3, 1, KEY_NUMERIC_0),
KEY(3, 2, KEY_NUMERIC_POUND),
KEY(3, 3, KEY_DOWN),
KEY(3, 4, KEY_RIGHT),
KEY(4, 0, KEY_FN_F3),
KEY(4, 1, KEY_FN_F4),
KEY(4, 2, KEY_MUTE),
KEY(4, 3, KEY_HOME),
KEY(4, 4, KEY_BACK),
KEY(5, 0, KEY_VOLUMEDOWN),
KEY(5, 1, KEY_VOLUMEUP),
KEY(5, 2, KEY_F1),
KEY(5, 3, KEY_F2),
KEY(5, 4, KEY_F3),
};
static const struct matrix_keymap_data keymap_data = {
.keymap = keymap,
.keymap_size = ARRAY_SIZE(keymap),
};
static struct matrix_keypad_platform_data keypad_config = {
.keymap_data = &keymap_data,
.num_row_gpios = 6,
.num_col_gpios = 5,
.debounce_ms = 0, /* minimum */
.active_low = 0, /* pull up realization */
.no_autorepeat = 0,
};
static struct tnetv107x_device_info evm_device_info __initconst = {
.serial_config = &serial_config,
.mmc_config[1] = &mmc_config, /* controller 1 */
.nand_config[0] = &nand_config, /* chip select 0 */
.keypad_config = &keypad_config,
};
static __init void tnetv107x_evm_board_init(void)
......
......@@ -236,7 +236,7 @@ static int __init clk_disable_unused(void)
if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
continue;
pr_info("Clocks: disable unused %s\n", ck->name);
pr_debug("Clocks: disable unused %s\n", ck->name);
davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc,
(ck->flags & PSC_SWRSTDISABLE) ?
......@@ -287,6 +287,79 @@ static unsigned long clk_sysclk_recalc(struct clk *clk)
return rate;
}
int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate)
{
unsigned v;
struct pll_data *pll;
unsigned long input;
unsigned ratio = 0;
/* If this is the PLL base clock, wrong function to call */
if (clk->pll_data)
return -EINVAL;
/* There must be a parent... */
if (WARN_ON(!clk->parent))
return -EINVAL;
/* ... the parent must be a PLL... */
if (WARN_ON(!clk->parent->pll_data))
return -EINVAL;
/* ... and this clock must have a divider. */
if (WARN_ON(!clk->div_reg))
return -EINVAL;
pll = clk->parent->pll_data;
input = clk->parent->rate;
/* If pre-PLL, source clock is before the multiplier and divider(s) */
if (clk->flags & PRE_PLL)
input = pll->input_rate;
if (input > rate) {
/*
* Can afford to provide an output little higher than requested
* only if maximum rate supported by hardware on this sysclk
* is known.
*/
if (clk->maxrate) {
ratio = DIV_ROUND_CLOSEST(input, rate);
if (input / ratio > clk->maxrate)
ratio = 0;
}
if (ratio == 0)
ratio = DIV_ROUND_UP(input, rate);
ratio--;
}
if (ratio > PLLDIV_RATIO_MASK)
return -EINVAL;
do {
v = __raw_readl(pll->base + PLLSTAT);
} while (v & PLLSTAT_GOSTAT);
v = __raw_readl(pll->base + clk->div_reg);
v &= ~PLLDIV_RATIO_MASK;
v |= ratio | PLLDIV_EN;
__raw_writel(v, pll->base + clk->div_reg);
v = __raw_readl(pll->base + PLLCMD);
v |= PLLCMD_GOSET;
__raw_writel(v, pll->base + PLLCMD);
do {
v = __raw_readl(pll->base + PLLSTAT);
} while (v & PLLSTAT_GOSTAT);
return 0;
}
EXPORT_SYMBOL(davinci_set_sysclk_rate);
static unsigned long clk_leafclk_recalc(struct clk *clk)
{
if (WARN_ON(!clk->parent))
......
......@@ -70,6 +70,9 @@
#include <linux/list.h>
#include <asm/clkdev.h>
#define PLLSTAT_GOSTAT BIT(0)
#define PLLCMD_GOSET BIT(0)
struct pll_data {
u32 phys_base;
void __iomem *base;
......@@ -86,6 +89,7 @@ struct clk {
struct module *owner;
const char *name;
unsigned long rate;
unsigned long maxrate; /* H/W supported max rate */
u8 usecount;
u8 lpsc;
u8 gpsc;
......@@ -118,6 +122,7 @@ struct clk {
int davinci_clk_init(struct clk_lookup *clocks);
int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
unsigned int mult, unsigned int postdiv);
int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate);
extern struct platform_device davinci_wdt_device;
extern void davinci_watchdog_reset(struct platform_device *);
......
......@@ -34,6 +34,8 @@
struct davinci_cpufreq {
struct device *dev;
struct clk *armclk;
struct clk *asyncclk;
unsigned long asyncrate;
};
static struct davinci_cpufreq cpufreq;
......@@ -104,15 +106,27 @@ static int davinci_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* if moving to higher frequency, up the voltage beforehand */
if (pdata->set_voltage && freqs.new > freqs.old)
pdata->set_voltage(idx);
if (pdata->set_voltage && freqs.new > freqs.old) {
ret = pdata->set_voltage(idx);
if (ret)
goto out;
}
ret = clk_set_rate(armclk, idx);
if (ret)
goto out;
if (cpufreq.asyncclk) {
ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
if (ret)
goto out;
}
/* if moving to lower freq, lower the voltage after lowering freq */
if (pdata->set_voltage && freqs.new < freqs.old)
pdata->set_voltage(idx);
out:
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
......@@ -185,6 +199,7 @@ static struct cpufreq_driver davinci_driver = {
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
{
struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
struct clk *asyncclk;
if (!pdata)
return -EINVAL;
......@@ -199,6 +214,12 @@ static int __init davinci_cpufreq_probe(struct platform_device *pdev)
return PTR_ERR(cpufreq.armclk);
}
asyncclk = clk_get(cpufreq.dev, "async");
if (!IS_ERR(asyncclk)) {
cpufreq.asyncclk = asyncclk;
cpufreq.asyncrate = clk_get_rate(asyncclk);
}
return cpufreq_register_driver(&davinci_driver);
}
......@@ -206,6 +227,9 @@ static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
{
clk_put(cpufreq.armclk);
if (cpufreq.asyncclk)
clk_put(cpufreq.asyncclk);
return cpufreq_unregister_driver(&davinci_driver);
}
......
......@@ -86,6 +86,8 @@ static struct clk pll0_sysclk3 = {
.parent = &pll0_clk,
.flags = CLK_PLL,
.div_reg = PLLDIV3,
.set_rate = davinci_set_sysclk_rate,
.maxrate = 100000000,
};
static struct clk pll0_sysclk4 = {
......@@ -323,12 +325,19 @@ static struct clk lcdc_clk = {
.gpsc = 1,
};
static struct clk mmcsd_clk = {
.name = "mmcsd",
static struct clk mmcsd0_clk = {
.name = "mmcsd0",
.parent = &pll0_sysclk2,
.lpsc = DA8XX_LPSC0_MMC_SD,
};
static struct clk mmcsd1_clk = {
.name = "mmcsd1",
.parent = &pll0_sysclk2,
.lpsc = DA850_LPSC1_MMC_SD1,
.gpsc = 1,
};
static struct clk aemif_clk = {
.name = "aemif",
.parent = &pll0_sysclk3,
......@@ -375,7 +384,8 @@ static struct clk_lookup da850_clks[] = {
CLK("davinci_emac.1", NULL, &emac_clk),
CLK("davinci-mcasp.0", NULL, &mcasp_clk),
CLK("da8xx_lcdc.0", NULL, &lcdc_clk),
CLK("davinci_mmc.0", NULL, &mmcsd_clk),
CLK("davinci_mmc.0", NULL, &mmcsd0_clk),
CLK("davinci_mmc.1", NULL, &mmcsd1_clk),
CLK(NULL, "aemif", &aemif_clk),
CLK(NULL, NULL, NULL),
};
......@@ -572,15 +582,9 @@ const short da850_cpgmac_pins[] __initdata = {
DA850_MII_TXD_2, DA850_MII_TXD_1, DA850_MII_TXD_0, DA850_MII_RXER,
DA850_MII_CRS, DA850_MII_RXCLK, DA850_MII_RXDV, DA850_MII_RXD_3,
DA850_MII_RXD_2, DA850_MII_RXD_1, DA850_MII_RXD_0, DA850_MDIO_CLK,
DA850_MDIO_D,
-1
};
const short da850_rmii_pins[] __initdata = {
DA850_RMII_TXD_0, DA850_RMII_TXD_1, DA850_RMII_TXEN,
DA850_RMII_CRS_DV, DA850_RMII_RXD_0, DA850_RMII_RXD_1,
DA850_RMII_RXER, DA850_RMII_MHZ_50_CLK, DA850_MDIO_CLK,
DA850_MDIO_D,
DA850_MDIO_D, DA850_RMII_TXD_0, DA850_RMII_TXD_1, DA850_RMII_TXEN,
DA850_RMII_CRS_DV, DA850_RMII_RXD_0, DA850_RMII_RXD_1, DA850_RMII_RXER,
DA850_RMII_MHZ_50_CLK,
-1
};
......@@ -607,27 +611,19 @@ const short da850_mmcsd0_pins[] __initdata = {
-1
};
const short da850_nand_pins[] __initdata = {
DA850_EMA_D_7, DA850_EMA_D_6, DA850_EMA_D_5, DA850_EMA_D_4,
DA850_EMA_D_3, DA850_EMA_D_2, DA850_EMA_D_1, DA850_EMA_D_0,
DA850_EMA_A_1, DA850_EMA_A_2, DA850_NEMA_CS_3, DA850_NEMA_CS_4,
DA850_NEMA_WE, DA850_NEMA_OE,
-1
};
const short da850_nor_pins[] __initdata = {
const short da850_emif25_pins[] __initdata = {
DA850_EMA_BA_1, DA850_EMA_CLK, DA850_EMA_WAIT_1, DA850_NEMA_CS_2,
DA850_NEMA_WE, DA850_NEMA_OE, DA850_EMA_D_0, DA850_EMA_D_1,
DA850_EMA_D_2, DA850_EMA_D_3, DA850_EMA_D_4, DA850_EMA_D_5,
DA850_EMA_D_6, DA850_EMA_D_7, DA850_EMA_D_8, DA850_EMA_D_9,
DA850_EMA_D_10, DA850_EMA_D_11, DA850_EMA_D_12, DA850_EMA_D_13,
DA850_EMA_D_14, DA850_EMA_D_15, DA850_EMA_A_0, DA850_EMA_A_1,
DA850_EMA_A_2, DA850_EMA_A_3, DA850_EMA_A_4, DA850_EMA_A_5,
DA850_EMA_A_6, DA850_EMA_A_7, DA850_EMA_A_8, DA850_EMA_A_9,
DA850_EMA_A_10, DA850_EMA_A_11, DA850_EMA_A_12, DA850_EMA_A_13,
DA850_EMA_A_14, DA850_EMA_A_15, DA850_EMA_A_16, DA850_EMA_A_17,
DA850_EMA_A_18, DA850_EMA_A_19, DA850_EMA_A_20, DA850_EMA_A_21,
DA850_EMA_A_22, DA850_EMA_A_23,
DA850_NEMA_CS_3, DA850_NEMA_CS_4, DA850_NEMA_WE, DA850_NEMA_OE,
DA850_EMA_D_0, DA850_EMA_D_1, DA850_EMA_D_2, DA850_EMA_D_3,
DA850_EMA_D_4, DA850_EMA_D_5, DA850_EMA_D_6, DA850_EMA_D_7,
DA850_EMA_D_8, DA850_EMA_D_9, DA850_EMA_D_10, DA850_EMA_D_11,
DA850_EMA_D_12, DA850_EMA_D_13, DA850_EMA_D_14, DA850_EMA_D_15,
DA850_EMA_A_0, DA850_EMA_A_1, DA850_EMA_A_2, DA850_EMA_A_3,
DA850_EMA_A_4, DA850_EMA_A_5, DA850_EMA_A_6, DA850_EMA_A_7,
DA850_EMA_A_8, DA850_EMA_A_9, DA850_EMA_A_10, DA850_EMA_A_11,
DA850_EMA_A_12, DA850_EMA_A_13, DA850_EMA_A_14, DA850_EMA_A_15,
DA850_EMA_A_16, DA850_EMA_A_17, DA850_EMA_A_18, DA850_EMA_A_19,
DA850_EMA_A_20, DA850_EMA_A_21, DA850_EMA_A_22, DA850_EMA_A_23,
-1
};
......@@ -851,7 +847,7 @@ static const struct da850_opp da850_opp_300 = {
.prediv = 1,
.mult = 25,
.postdiv = 2,
.cvdd_min = 1140000,
.cvdd_min = 1200000,
.cvdd_max = 1320000,
};
......@@ -860,7 +856,7 @@ static const struct da850_opp da850_opp_200 = {
.prediv = 1,
.mult = 25,
.postdiv = 3,
.cvdd_min = 1050000,
.cvdd_min = 1100000,
.cvdd_max = 1160000,
};
......@@ -869,7 +865,7 @@ static const struct da850_opp da850_opp_96 = {
.prediv = 1,
.mult = 20,
.postdiv = 5,
.cvdd_min = 950000,
.cvdd_min = 1000000,
.cvdd_max = 1050000,
};
......@@ -929,10 +925,16 @@ static struct platform_device da850_cpufreq_device = {
.dev = {
.platform_data = &cpufreq_info,
},
.id = -1,
};
int __init da850_register_cpufreq(void)
int __init da850_register_cpufreq(char *async_clk)
{
/* cpufreq driver can help keep an "async" clock constant */
if (async_clk)
clk_add_alias("async", da850_cpufreq_device.name,
async_clk, NULL);
return platform_device_register(&da850_cpufreq_device);
}
......@@ -983,7 +985,7 @@ static int da850_set_pll0rate(struct clk *clk, unsigned long index)
return 0;
}
#else
int __init da850_register_cpufreq(void)
int __init da850_register_cpufreq(char *async_clk)
{
return 0;
}
......
......@@ -24,6 +24,7 @@
#include "clock.h"
#define DA8XX_TPCC_BASE 0x01c00000
#define DA850_MMCSD1_BASE 0x01e1b000
#define DA850_TPCC1_BASE 0x01e30000
#define DA8XX_TPTC0_BASE 0x01c08000
#define DA8XX_TPTC1_BASE 0x01c08400
......@@ -41,7 +42,6 @@
#define DA8XX_EMAC_CTRL_REG_OFFSET 0x3000
#define DA8XX_EMAC_MOD_REG_OFFSET 0x2000
#define DA8XX_EMAC_RAM_OFFSET 0x0000
#define DA8XX_MDIO_REG_OFFSET 0x4000
#define DA8XX_EMAC_CTRL_RAM_SIZE SZ_8K
void __iomem *da8xx_syscfg0_base;
......@@ -351,7 +351,7 @@ int __init da8xx_register_watchdog(void)
static struct resource da8xx_emac_resources[] = {
{
.start = DA8XX_EMAC_CPPI_PORT_BASE,
.end = DA8XX_EMAC_CPPI_PORT_BASE + 0x5000 - 1,
.end = DA8XX_EMAC_CPPI_PORT_BASE + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
{
......@@ -380,7 +380,6 @@ struct emac_platform_data da8xx_emac_pdata = {
.ctrl_reg_offset = DA8XX_EMAC_CTRL_REG_OFFSET,
.ctrl_mod_reg_offset = DA8XX_EMAC_MOD_REG_OFFSET,
.ctrl_ram_offset = DA8XX_EMAC_RAM_OFFSET,
.mdio_reg_offset = DA8XX_MDIO_REG_OFFSET,
.ctrl_ram_size = DA8XX_EMAC_CTRL_RAM_SIZE,
.version = EMAC_VERSION_2,
};
......@@ -395,9 +394,34 @@ static struct platform_device da8xx_emac_device = {
.resource = da8xx_emac_resources,
};
static struct resource da8xx_mdio_resources[] = {
{
.start = DA8XX_EMAC_MDIO_BASE,
.end = DA8XX_EMAC_MDIO_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device da8xx_mdio_device = {
.name = "davinci_mdio",
.id = 0,
.num_resources = ARRAY_SIZE(da8xx_mdio_resources),
.resource = da8xx_mdio_resources,
};
int __init da8xx_register_emac(void)
{
return platform_device_register(&da8xx_emac_device);
int ret;
ret = platform_device_register(&da8xx_mdio_device);
if (ret < 0)
return ret;
ret = platform_device_register(&da8xx_emac_device);
if (ret < 0)
return ret;
ret = clk_add_alias(NULL, dev_name(&da8xx_mdio_device.dev),
NULL, &da8xx_emac_device.dev);
return ret;
}
static struct resource da830_mcasp1_resources[] = {
......@@ -566,6 +590,44 @@ int __init da8xx_register_mmcsd0(struct davinci_mmc_config *config)
return platform_device_register(&da8xx_mmcsd0_device);
}
#ifdef CONFIG_ARCH_DAVINCI_DA850
static struct resource da850_mmcsd1_resources[] = {
{ /* registers */
.start = DA850_MMCSD1_BASE,
.end = DA850_MMCSD1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{ /* interrupt */
.start = IRQ_DA850_MMCSDINT0_1,
.end = IRQ_DA850_MMCSDINT0_1,
.flags = IORESOURCE_IRQ,
},
{ /* DMA RX */
.start = EDMA_CTLR_CHAN(1, 28),
.end = EDMA_CTLR_CHAN(1, 28),
.flags = IORESOURCE_DMA,
},
{ /* DMA TX */
.start = EDMA_CTLR_CHAN(1, 29),
.end = EDMA_CTLR_CHAN(1, 29),
.flags = IORESOURCE_DMA,
},
};
static struct platform_device da850_mmcsd1_device = {
.name = "davinci_mmc",
.id = 1,
.num_resources = ARRAY_SIZE(da850_mmcsd1_resources),
.resource = da850_mmcsd1_resources,
};
int __init da850_register_mmcsd1(struct davinci_mmc_config *config)
{
da850_mmcsd1_device.dev.platform_data = config;
return platform_device_register(&da850_mmcsd1_device);
}
#endif
static struct resource da8xx_rtc_resources[] = {
{
.start = DA8XX_RTC_BASE,
......
......@@ -31,8 +31,10 @@
#define TNETV107X_TPTC0_BASE 0x01c10000
#define TNETV107X_TPTC1_BASE 0x01c10400
#define TNETV107X_WDOG_BASE 0x08086700
#define TNETV107X_TSC_BASE 0x08088500
#define TNETV107X_SDIO0_BASE 0x08088700
#define TNETV107X_SDIO1_BASE 0x08088800
#define TNETV107X_KEYPAD_BASE 0x08088a00
#define TNETV107X_ASYNC_EMIF_CNTRL_BASE 0x08200000
#define TNETV107X_ASYNC_EMIF_DATA_CE0_BASE 0x30000000
#define TNETV107X_ASYNC_EMIF_DATA_CE1_BASE 0x40000000
......@@ -298,12 +300,55 @@ static int __init nand_init(int chipsel, struct davinci_nand_pdata *data)
return platform_device_register(pdev);
}
static struct resource keypad_resources[] = {
{
.start = TNETV107X_KEYPAD_BASE,
.end = TNETV107X_KEYPAD_BASE + 0xff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TNETV107X_KEYPAD,
.flags = IORESOURCE_IRQ,
.name = "press",
},
{
.start = IRQ_TNETV107X_KEYPAD_FREE,
.flags = IORESOURCE_IRQ,
.name = "release",
},
};
static struct platform_device keypad_device = {
.name = "tnetv107x-keypad",
.num_resources = ARRAY_SIZE(keypad_resources),
.resource = keypad_resources,
};
static struct resource tsc_resources[] = {
{
.start = TNETV107X_TSC_BASE,
.end = TNETV107X_TSC_BASE + 0xff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_TNETV107X_TSC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tsc_device = {
.name = "tnetv107x-ts",
.num_resources = ARRAY_SIZE(tsc_resources),
.resource = tsc_resources,
};
void __init tnetv107x_devices_init(struct tnetv107x_device_info *info)
{
int i;
platform_device_register(&edma_device);
platform_device_register(&tnetv107x_wdt_device);
platform_device_register(&tsc_device);
if (info->serial_config)
davinci_serial_init(info->serial_config);
......@@ -317,4 +362,9 @@ void __init tnetv107x_devices_init(struct tnetv107x_device_info *info)
for (i = 0; i < 4; i++)
if (info->nand_config[i])
nand_init(i, info->nand_config[i]);
if (info->keypad_config) {
keypad_device.dev.platform_data = info->keypad_config;
platform_device_register(&keypad_device);
}
}
......@@ -213,7 +213,7 @@ void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config)
IO_ADDRESS(DAVINCI_SYSTEM_MODULE_BASE + 0x7c);
/* Configure pull down control */
__raw_writel((__raw_readl(pupdctl1) & ~0x400),
__raw_writel((__raw_readl(pupdctl1) & ~0xfc0),
pupdctl1);
mmcsd1_resources[0].start = DM365_MMCSD1_BASE;
......
......@@ -691,7 +691,6 @@ static struct emac_platform_data dm365_emac_pdata = {
.ctrl_reg_offset = DM365_EMAC_CNTRL_OFFSET,
.ctrl_mod_reg_offset = DM365_EMAC_CNTRL_MOD_OFFSET,
.ctrl_ram_offset = DM365_EMAC_CNTRL_RAM_OFFSET,
.mdio_reg_offset = DM365_EMAC_MDIO_OFFSET,
.ctrl_ram_size = DM365_EMAC_CNTRL_RAM_SIZE,
.version = EMAC_VERSION_2,
};
......@@ -699,7 +698,7 @@ static struct emac_platform_data dm365_emac_pdata = {
static struct resource dm365_emac_resources[] = {
{
.start = DM365_EMAC_BASE,
.end = DM365_EMAC_BASE + 0x47ff,
.end = DM365_EMAC_BASE + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
{
......@@ -734,6 +733,21 @@ static struct platform_device dm365_emac_device = {
.resource = dm365_emac_resources,
};
static struct resource dm365_mdio_resources[] = {
{
.start = DM365_EMAC_MDIO_BASE,
.end = DM365_EMAC_MDIO_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device dm365_mdio_device = {
.name = "davinci_mdio",
.id = 0,
.num_resources = ARRAY_SIZE(dm365_mdio_resources),
.resource = dm365_mdio_resources,
};
static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = {
[IRQ_VDINT0] = 2,
[IRQ_VDINT1] = 6,
......@@ -1219,7 +1233,12 @@ static int __init dm365_init_devices(void)
davinci_cfg_reg(DM365_INT_EDMA_CC);
platform_device_register(&dm365_edma_device);
platform_device_register(&dm365_mdio_device);
platform_device_register(&dm365_emac_device);
clk_add_alias(NULL, dev_name(&dm365_mdio_device.dev),
NULL, &dm365_emac_device.dev);
/* Add isif clock alias */
clk_add_alias("master", dm365_isif_dev.name, "vpss_master", NULL);
platform_device_register(&dm365_vpss_device);
......
......@@ -322,7 +322,6 @@ static struct emac_platform_data dm644x_emac_pdata = {
.ctrl_reg_offset = DM644X_EMAC_CNTRL_OFFSET,
.ctrl_mod_reg_offset = DM644X_EMAC_CNTRL_MOD_OFFSET,
.ctrl_ram_offset = DM644X_EMAC_CNTRL_RAM_OFFSET,
.mdio_reg_offset = DM644X_EMAC_MDIO_OFFSET,
.ctrl_ram_size = DM644X_EMAC_CNTRL_RAM_SIZE,
.version = EMAC_VERSION_1,
};
......@@ -330,7 +329,7 @@ static struct emac_platform_data dm644x_emac_pdata = {
static struct resource dm644x_emac_resources[] = {
{
.start = DM644X_EMAC_BASE,
.end = DM644X_EMAC_BASE + 0x47ff,
.end = DM644X_EMAC_BASE + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
{
......@@ -350,6 +349,21 @@ static struct platform_device dm644x_emac_device = {
.resource = dm644x_emac_resources,
};
static struct resource dm644x_mdio_resources[] = {
{
.start = DM644X_EMAC_MDIO_BASE,
.end = DM644X_EMAC_MDIO_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device dm644x_mdio_device = {
.name = "davinci_mdio",
.id = 0,
.num_resources = ARRAY_SIZE(dm644x_mdio_resources),
.resource = dm644x_mdio_resources,
};
/*
* Device specific mux setup
*
......@@ -776,7 +790,12 @@ static int __init dm644x_init_devices(void)
clk_add_alias("master", dm644x_ccdc_dev.name, "vpss_master", NULL);
clk_add_alias("slave", dm644x_ccdc_dev.name, "vpss_slave", NULL);
platform_device_register(&dm644x_edma_device);
platform_device_register(&dm644x_mdio_device);
platform_device_register(&dm644x_emac_device);
clk_add_alias(NULL, dev_name(&dm644x_mdio_device.dev),
NULL, &dm644x_emac_device.dev);
platform_device_register(&dm644x_vpss_device);
platform_device_register(&dm644x_ccdc_dev);
platform_device_register(&vpfe_capture_dev);
......
......@@ -358,7 +358,6 @@ static struct emac_platform_data dm646x_emac_pdata = {
.ctrl_reg_offset = DM646X_EMAC_CNTRL_OFFSET,
.ctrl_mod_reg_offset = DM646X_EMAC_CNTRL_MOD_OFFSET,
.ctrl_ram_offset = DM646X_EMAC_CNTRL_RAM_OFFSET,
.mdio_reg_offset = DM646X_EMAC_MDIO_OFFSET,
.ctrl_ram_size = DM646X_EMAC_CNTRL_RAM_SIZE,
.version = EMAC_VERSION_2,
};
......@@ -366,7 +365,7 @@ static struct emac_platform_data dm646x_emac_pdata = {
static struct resource dm646x_emac_resources[] = {
{
.start = DM646X_EMAC_BASE,
.end = DM646X_EMAC_BASE + 0x47ff,
.end = DM646X_EMAC_BASE + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
{
......@@ -401,6 +400,21 @@ static struct platform_device dm646x_emac_device = {
.resource = dm646x_emac_resources,
};
static struct resource dm646x_mdio_resources[] = {
{
.start = DM646X_EMAC_MDIO_BASE,
.end = DM646X_EMAC_MDIO_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device dm646x_mdio_device = {
.name = "davinci_mdio",
.id = 0,
.num_resources = ARRAY_SIZE(dm646x_mdio_resources),
.resource = dm646x_mdio_resources,
};
/*
* Device specific mux setup
*
......@@ -896,7 +910,11 @@ static int __init dm646x_init_devices(void)
if (!cpu_is_davinci_dm646x())
return 0;
platform_device_register(&dm646x_mdio_device);
platform_device_register(&dm646x_emac_device);
clk_add_alias(NULL, dev_name(&dm646x_mdio_device.dev),
NULL, &dm646x_emac_device.dev);
return 0;
}
postcore_initcall(dm646x_init_devices);
......@@ -354,10 +354,12 @@ static int irq2ctlr(int irq)
static irqreturn_t dma_irq_handler(int irq, void *data)
{
int i;
unsigned ctlr;
int ctlr;
unsigned int cnt = 0;
ctlr = irq2ctlr(irq);
if (ctlr < 0)
return IRQ_NONE;
dev_dbg(data, "dma_irq_handler\n");
......@@ -408,10 +410,12 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
static irqreturn_t dma_ccerr_handler(int irq, void *data)
{
int i;
unsigned ctlr;
int ctlr;
unsigned int cnt = 0;
ctlr = irq2ctlr(irq);
if (ctlr < 0)
return IRQ_NONE;
dev_dbg(data, "dma_ccerr_handler\n");
......
/*
* TI DaVinci AEMIF support
*
* Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef _MACH_DAVINCI_AEMIF_H
#define _MACH_DAVINCI_AEMIF_H
#define NRCSR_OFFSET 0x00
#define AWCCR_OFFSET 0x04
#define A1CR_OFFSET 0x10
#define ACR_ASIZE_MASK 0x3
#define ACR_EW_MASK BIT(30)
#define ACR_SS_MASK BIT(31)
/* All timings in nanoseconds */
struct davinci_aemif_timing {
u8 wsetup;
u8 wstrobe;
u8 whold;
u8 rsetup;
u8 rstrobe;
u8 rhold;
u8 ta;
};
int davinci_aemif_setup_timing(struct davinci_aemif_timing *t,
void __iomem *base, unsigned cs);
#endif
......@@ -76,9 +76,10 @@ int da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata);
int da8xx_register_emac(void);
int da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata);
int da8xx_register_mmcsd0(struct davinci_mmc_config *config);
int da850_register_mmcsd1(struct davinci_mmc_config *config);
void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata);
int da8xx_register_rtc(void);
int da850_register_cpufreq(void);
int da850_register_cpufreq(char *async_clk);
int da8xx_register_cpuidle(void);
void __iomem * __init da8xx_get_mem_ctlr(void);
int da850_register_pm(struct platform_device *pdev);
......@@ -121,11 +122,9 @@ extern const short da850_uart2_pins[];
extern const short da850_i2c0_pins[];
extern const short da850_i2c1_pins[];
extern const short da850_cpgmac_pins[];
extern const short da850_rmii_pins[];
extern const short da850_mcasp_pins[];
extern const short da850_lcdcntl_pins[];
extern const short da850_mmcsd0_pins[];
extern const short da850_nand_pins[];
extern const short da850_nor_pins[];
extern const short da850_emif25_pins[];
#endif /* __ASM_ARCH_DAVINCI_DA8XX_H */
......@@ -21,10 +21,10 @@
#include <media/davinci/vpfe_capture.h>
#define DM365_EMAC_BASE (0x01D07000)
#define DM365_EMAC_MDIO_BASE (DM365_EMAC_BASE + 0x4000)
#define DM365_EMAC_CNTRL_OFFSET (0x0000)
#define DM365_EMAC_CNTRL_MOD_OFFSET (0x3000)
#define DM365_EMAC_CNTRL_RAM_OFFSET (0x1000)
#define DM365_EMAC_MDIO_OFFSET (0x4000)
#define DM365_EMAC_CNTRL_RAM_SIZE (0x2000)
/* Base of key scan register bank */
......
......@@ -28,10 +28,10 @@
#include <media/davinci/vpfe_capture.h>
#define DM644X_EMAC_BASE (0x01C80000)
#define DM644X_EMAC_MDIO_BASE (DM644X_EMAC_BASE + 0x4000)
#define DM644X_EMAC_CNTRL_OFFSET (0x0000)
#define DM644X_EMAC_CNTRL_MOD_OFFSET (0x1000)
#define DM644X_EMAC_CNTRL_RAM_OFFSET (0x2000)
#define DM644X_EMAC_MDIO_OFFSET (0x4000)
#define DM644X_EMAC_CNTRL_RAM_SIZE (0x2000)
#define DM644X_ASYNC_EMIF_CONTROL_BASE 0x01E00000
......
......@@ -19,10 +19,10 @@
#include <linux/davinci_emac.h>
#define DM646X_EMAC_BASE (0x01C80000)
#define DM646X_EMAC_MDIO_BASE (DM646X_EMAC_BASE + 0x4000)
#define DM646X_EMAC_CNTRL_OFFSET (0x0000)
#define DM646X_EMAC_CNTRL_MOD_OFFSET (0x1000)
#define DM646X_EMAC_CNTRL_RAM_OFFSET (0x2000)
#define DM646X_EMAC_MDIO_OFFSET (0x4000)
#define DM646X_EMAC_CNTRL_RAM_SIZE (0x2000)
#define DM646X_ASYNC_EMIF_CONTROL_BASE 0x20008000
......
......@@ -30,9 +30,6 @@
#include <linux/mtd/nand.h>
#define NRCSR_OFFSET 0x00
#define AWCCR_OFFSET 0x04
#define A1CR_OFFSET 0x10
#define NANDFCR_OFFSET 0x60
#define NANDFSR_OFFSET 0x64
#define NANDF1ECC_OFFSET 0x70
......@@ -83,6 +80,9 @@ struct davinci_nand_pdata { /* platform_data */
/* Main and mirror bbt descriptor overrides */
struct nand_bbt_descr *bbt_td;
struct nand_bbt_descr *bbt_md;
/* Access timings */
struct davinci_aemif_timing *timing;
};
#endif /* __ARCH_ARM_DAVINCI_NAND_H */
......@@ -172,6 +172,7 @@
#define DA8XX_LPSC1_UART2 13
#define DA8XX_LPSC1_LCDC 16
#define DA8XX_LPSC1_PWM 17
#define DA850_LPSC1_MMC_SD1 18
#define DA8XX_LPSC1_ECAP 20
#define DA830_LPSC1_EQEP 21
#define DA850_LPSC1_TPTC2 21
......
......@@ -33,6 +33,8 @@
#ifndef __ASSEMBLY__
#include <linux/serial_8250.h>
#include <linux/input/matrix_keypad.h>
#include <mach/mmc.h>
#include <mach/nand.h>
#include <mach/serial.h>
......@@ -41,6 +43,7 @@ struct tnetv107x_device_info {
struct davinci_uart_config *serial_config;
struct davinci_mmc_config *mmc_config[2]; /* 2 controllers */
struct davinci_nand_pdata *nand_config[4]; /* 4 chipsels */
struct matrix_keypad_platform_data *keypad_config;
};
extern struct platform_device tnetv107x_wdt_device;
......
......@@ -88,6 +88,8 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
/* DA8xx boards */
DEBUG_LL_DA8XX(davinci_da830_evm, 2);
DEBUG_LL_DA8XX(davinci_da850_evm, 2);
DEBUG_LL_DA8XX(mityomapl138, 1);
DEBUG_LL_DA8XX(omapl138_hawkboard, 2);
/* TNETV107x boards */
DEBUG_LL_TNETV107X(tnetv107x, 1);
......
......@@ -104,7 +104,7 @@ static u32 pll_ext_freq[] = {
};
/* PSC control registers */
static u32 psc_regs[] __initconst = { TNETV107X_PSC_BASE };
static u32 psc_regs[] = { TNETV107X_PSC_BASE };
/* Host map for interrupt controller */
static u32 intc_host_map[] = { 0x01010000, 0x01010101, -1 };
......@@ -581,7 +581,14 @@ static struct davinci_id ids[] = {
.part_no = 0xb8a1,
.manufacturer = 0x017,
.cpu_id = DAVINCI_CPU_ID_TNETV107X,
.name = "tnetv107x rev1.0",
.name = "tnetv107x rev 1.0",
},
{
.variant = 0x1,
.part_no = 0xb8a1,
.manufacturer = 0x017,
.cpu_id = DAVINCI_CPU_ID_TNETV107X,
.name = "tnetv107x rev 1.1/1.2",
},
};
......
......@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/i2c/pca953x.h>
......@@ -38,19 +39,37 @@
#include "mux.h"
#define AM35XX_EVM_PHY_MASK (0xF)
#define AM35XX_EVM_MDIO_FREQUENCY (1000000)
static struct mdio_platform_data am3517_evm_mdio_pdata = {
.bus_freq = AM35XX_EVM_MDIO_FREQUENCY,
};
static struct resource am3517_mdio_resources[] = {
{
.start = AM35XX_IPSS_EMAC_BASE + AM35XX_EMAC_MDIO_OFFSET,
.end = AM35XX_IPSS_EMAC_BASE + AM35XX_EMAC_MDIO_OFFSET +
SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device am3517_mdio_device = {
.name = "davinci_mdio",
.id = 0,
.num_resources = ARRAY_SIZE(am3517_mdio_resources),
.resource = am3517_mdio_resources,
.dev.platform_data = &am3517_evm_mdio_pdata,
};
static struct emac_platform_data am3517_evm_emac_pdata = {
.phy_mask = AM35XX_EVM_PHY_MASK,
.mdio_max_freq = AM35XX_EVM_MDIO_FREQUENCY,
.rmii_en = 1,
};
static struct resource am3517_emac_resources[] = {
{
.start = AM35XX_IPSS_EMAC_BASE,
.end = AM35XX_IPSS_EMAC_BASE + 0x3FFFF,
.end = AM35XX_IPSS_EMAC_BASE + 0x2FFFF,
.flags = IORESOURCE_MEM,
},
{
......@@ -113,7 +132,6 @@ void am3517_evm_ethernet_init(struct emac_platform_data *pdata)
pdata->ctrl_reg_offset = AM35XX_EMAC_CNTRL_OFFSET;
pdata->ctrl_mod_reg_offset = AM35XX_EMAC_CNTRL_MOD_OFFSET;
pdata->ctrl_ram_offset = AM35XX_EMAC_CNTRL_RAM_OFFSET;
pdata->mdio_reg_offset = AM35XX_EMAC_MDIO_OFFSET;
pdata->ctrl_ram_size = AM35XX_EMAC_CNTRL_RAM_SIZE;
pdata->version = EMAC_VERSION_2;
pdata->hw_ram_addr = AM35XX_EMAC_HW_RAM_ADDR;
......@@ -121,6 +139,9 @@ void am3517_evm_ethernet_init(struct emac_platform_data *pdata)
pdata->interrupt_disable = am3517_disable_ethernet_int;
am3517_emac_device.dev.platform_data = pdata;
platform_device_register(&am3517_emac_device);
platform_device_register(&am3517_mdio_device);
clk_add_alias(NULL, dev_name(&am3517_mdio_device.dev),
NULL, &am3517_emac_device.dev);
regval = omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET);
regval = regval & (~(AM35XX_CPGMACSS_SW_RST));
......
......@@ -443,6 +443,15 @@ config KEYBOARD_OMAP4
To compile this driver as a module, choose M here: the
module will be called omap4-keypad.
config KEYBOARD_TNETV107X
tristate "TI TNETV107X keypad support"
depends on ARCH_DAVINCI_TNETV107X
help
Say Y here if you want to use the TNETV107X keypad.
To compile this driver as a module, choose M here: the
module will be called tnetv107x-keypad.
config KEYBOARD_TWL4030
tristate "TI TWL4030/TWL5030/TPS659x0 keypad support"
depends on TWL4030_CORE
......
......@@ -40,6 +40,7 @@ obj-$(CONFIG_KEYBOARD_SH_KEYSC) += sh_keysc.o
obj-$(CONFIG_KEYBOARD_STMPE) += stmpe-keypad.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
obj-$(CONFIG_KEYBOARD_SUNKBD) += sunkbd.o
obj-$(CONFIG_KEYBOARD_TNETV107X) += tnetv107x-keypad.o
obj-$(CONFIG_KEYBOARD_TWL4030) += twl4030_keypad.o
obj-$(CONFIG_KEYBOARD_XTKBD) += xtkbd.o
obj-$(CONFIG_KEYBOARD_W90P910) += w90p910_keypad.o
/*
* Texas Instruments TNETV107X Keypad Driver
*
* Copyright (C) 2010 Texas Instruments
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/input/matrix_keypad.h>
#define BITS(x) (BIT(x) - 1)
#define KEYPAD_ROWS 9
#define KEYPAD_COLS 9
#define DEBOUNCE_MIN 0x400ul
#define DEBOUNCE_MAX 0x3ffffffful
struct keypad_regs {
u32 rev;
u32 mode;
u32 mask;
u32 pol;
u32 dclock;
u32 rclock;
u32 stable_cnt;
u32 in_en;
u32 out;
u32 out_en;
u32 in;
u32 lock;
u32 pres[3];
};
#define keypad_read(kp, reg) __raw_readl(&(kp)->regs->reg)
#define keypad_write(kp, reg, val) __raw_writel(val, &(kp)->regs->reg)
struct keypad_data {
struct input_dev *input_dev;
struct resource *res;
struct keypad_regs __iomem *regs;
struct clk *clk;
struct device *dev;
spinlock_t lock;
u32 irq_press;
u32 irq_release;
int rows, cols, row_shift;
int debounce_ms, active_low;
u32 prev_keys[3];
unsigned short keycodes[];
};
static irqreturn_t keypad_irq(int irq, void *data)
{
struct keypad_data *kp = data;
int i, bit, val, row, col, code;
unsigned long flags;
u32 curr_keys[3];
u32 change;
spin_lock_irqsave(&kp->lock, flags);
memset(curr_keys, 0, sizeof(curr_keys));
if (irq == kp->irq_press)
for (i = 0; i < 3; i++)
curr_keys[i] = keypad_read(kp, pres[i]);
for (i = 0; i < 3; i++) {
change = curr_keys[i] ^ kp->prev_keys[i];
while (change) {
bit = fls(change) - 1;
change ^= BIT(bit);
val = curr_keys[i] & BIT(bit);
bit += i * 32;
row = bit / KEYPAD_COLS;
col = bit % KEYPAD_COLS;
code = MATRIX_SCAN_CODE(row, col, kp->row_shift);
input_event(kp->input_dev, EV_MSC, MSC_SCAN, code);
input_report_key(kp->input_dev, kp->keycodes[code],
val);
}
}
input_sync(kp->input_dev);
memcpy(kp->prev_keys, curr_keys, sizeof(curr_keys));
if (irq == kp->irq_press)
keypad_write(kp, lock, 0); /* Allow hardware updates */
spin_unlock_irqrestore(&kp->lock, flags);
return IRQ_HANDLED;
}
static int keypad_start(struct input_dev *dev)
{
struct keypad_data *kp = input_get_drvdata(dev);
unsigned long mask, debounce, clk_rate_khz;
unsigned long flags;
clk_enable(kp->clk);
clk_rate_khz = clk_get_rate(kp->clk) / 1000;
spin_lock_irqsave(&kp->lock, flags);
/* Initialize device registers */
keypad_write(kp, mode, 0);
mask = BITS(kp->rows) << KEYPAD_COLS;
mask |= BITS(kp->cols);
keypad_write(kp, mask, ~mask);
keypad_write(kp, pol, kp->active_low ? 0 : 0x3ffff);
keypad_write(kp, stable_cnt, 3);
debounce = kp->debounce_ms * clk_rate_khz;
debounce = clamp(debounce, DEBOUNCE_MIN, DEBOUNCE_MAX);
keypad_write(kp, dclock, debounce);
keypad_write(kp, rclock, 4 * debounce);
keypad_write(kp, in_en, 1);
spin_unlock_irqrestore(&kp->lock, flags);
return 0;
}
static void keypad_stop(struct input_dev *dev)
{
struct keypad_data *kp = input_get_drvdata(dev);
synchronize_irq(kp->irq_press);
synchronize_irq(kp->irq_release);
clk_disable(kp->clk);
}
static int __devinit keypad_probe(struct platform_device *pdev)
{
const struct matrix_keypad_platform_data *pdata;
const struct matrix_keymap_data *keymap_data;
struct device *dev = &pdev->dev;
struct keypad_data *kp;
int error = 0, sz, row_shift;
u32 rev = 0;
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(dev, "cannot find device data\n");
return -EINVAL;
}
keymap_data = pdata->keymap_data;
if (!keymap_data) {
dev_err(dev, "cannot find keymap data\n");
return -EINVAL;
}
row_shift = get_count_order(pdata->num_col_gpios);
sz = offsetof(struct keypad_data, keycodes);
sz += (pdata->num_row_gpios << row_shift) * sizeof(kp->keycodes[0]);
kp = kzalloc(sz, GFP_KERNEL);
if (!kp) {
dev_err(dev, "cannot allocate device info\n");
return -ENOMEM;
}
kp->dev = dev;
kp->rows = pdata->num_row_gpios;
kp->cols = pdata->num_col_gpios;
kp->row_shift = row_shift;
platform_set_drvdata(pdev, kp);
spin_lock_init(&kp->lock);
kp->irq_press = platform_get_irq_byname(pdev, "press");
kp->irq_release = platform_get_irq_byname(pdev, "release");
if (kp->irq_press < 0 || kp->irq_release < 0) {
dev_err(dev, "cannot determine device interrupts\n");
error = -ENODEV;
goto error_res;
}
kp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!kp->res) {
dev_err(dev, "cannot determine register area\n");
error = -ENODEV;
goto error_res;
}
if (!request_mem_region(kp->res->start, resource_size(kp->res),
pdev->name)) {
dev_err(dev, "cannot claim register memory\n");
kp->res = NULL;
error = -EINVAL;
goto error_res;
}
kp->regs = ioremap(kp->res->start, resource_size(kp->res));
if (!kp->regs) {
dev_err(dev, "cannot map register memory\n");
error = -ENOMEM;
goto error_map;
}
kp->clk = clk_get(dev, NULL);
if (!kp->clk) {
dev_err(dev, "cannot claim device clock\n");
error = -EINVAL;
goto error_clk;
}
error = request_threaded_irq(kp->irq_press, NULL, keypad_irq, 0,
dev_name(dev), kp);
if (error < 0) {
dev_err(kp->dev, "Could not allocate keypad press key irq\n");
goto error_irq_press;
}
error = request_threaded_irq(kp->irq_release, NULL, keypad_irq, 0,
dev_name(dev), kp);
if (error < 0) {
dev_err(kp->dev, "Could not allocate keypad release key irq\n");
goto error_irq_release;
}
kp->input_dev = input_allocate_device();
if (!kp->input_dev) {
dev_err(dev, "cannot allocate input device\n");
error = -ENOMEM;
goto error_input;
}
input_set_drvdata(kp->input_dev, kp);
kp->input_dev->name = pdev->name;
kp->input_dev->dev.parent = &pdev->dev;
kp->input_dev->open = keypad_start;
kp->input_dev->close = keypad_stop;
kp->input_dev->evbit[0] = BIT_MASK(EV_KEY);
if (!pdata->no_autorepeat)
kp->input_dev->evbit[0] |= BIT_MASK(EV_REP);
clk_enable(kp->clk);
rev = keypad_read(kp, rev);
kp->input_dev->id.bustype = BUS_HOST;
kp->input_dev->id.product = ((rev >> 8) & 0x07);
kp->input_dev->id.version = ((rev >> 16) & 0xfff);
clk_disable(kp->clk);
kp->input_dev->keycode = kp->keycodes;
kp->input_dev->keycodesize = sizeof(kp->keycodes[0]);
kp->input_dev->keycodemax = kp->rows << kp->row_shift;
matrix_keypad_build_keymap(keymap_data, kp->row_shift, kp->keycodes,
kp->input_dev->keybit);
input_set_capability(kp->input_dev, EV_MSC, MSC_SCAN);
error = input_register_device(kp->input_dev);
if (error < 0) {
dev_err(dev, "Could not register input device\n");
goto error_reg;
}
return 0;
error_reg:
input_free_device(kp->input_dev);
error_input:
free_irq(kp->irq_release, kp);
error_irq_release:
free_irq(kp->irq_press, kp);
error_irq_press:
clk_put(kp->clk);
error_clk:
iounmap(kp->regs);
error_map:
release_mem_region(kp->res->start, resource_size(kp->res));
error_res:
platform_set_drvdata(pdev, NULL);
kfree(kp);
return error;
}
static int __devexit keypad_remove(struct platform_device *pdev)
{
struct keypad_data *kp = platform_get_drvdata(pdev);
free_irq(kp->irq_press, kp);
free_irq(kp->irq_release, kp);
input_unregister_device(kp->input_dev);
clk_put(kp->clk);
iounmap(kp->regs);
release_mem_region(kp->res->start, resource_size(kp->res));
platform_set_drvdata(pdev, NULL);
kfree(kp);
return 0;
}
static struct platform_driver keypad_driver = {
.probe = keypad_probe,
.remove = __devexit_p(keypad_remove),
.driver.name = "tnetv107x-keypad",
.driver.owner = THIS_MODULE,
};
static int __init keypad_init(void)
{
return platform_driver_register(&keypad_driver);
}
static void __exit keypad_exit(void)
{
platform_driver_unregister(&keypad_driver);
}
module_init(keypad_init);
module_exit(keypad_exit);
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_DESCRIPTION("TNETV107X Keypad Driver");
MODULE_ALIAS("platform: tnetv107x-keypad");
MODULE_LICENSE("GPL");
......@@ -362,6 +362,15 @@ config TOUCHSCREEN_MIGOR
To compile this driver as a module, choose M here: the
module will be called migor_ts.
config TOUCHSCREEN_TNETV107X
tristate "TI TNETV107X touchscreen support"
depends on ARCH_DAVINCI_TNETV107X
help
Say Y here if you want to use the TNETV107X touchscreen.
To compile this driver as a module, choose M here: the
module will be called tnetv107x-ts.
config TOUCHSCREEN_TOUCHRIGHT
tristate "Touchright serial touchscreen"
select SERIO
......
......@@ -40,6 +40,7 @@ obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_QT602240) += qt602240_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
......
/*
* Texas Instruments TNETV107X Touchscreen Driver
*
* Copyright (C) 2010 Texas Instruments
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <mach/tnetv107x.h>
#define TSC_PENUP_POLL (HZ / 5)
#define IDLE_TIMEOUT 100 /* msec */
/*
* The first and last samples of a touch interval are usually garbage and need
* to be filtered out with these devices. The following definitions control
* the number of samples skipped.
*/
#define TSC_HEAD_SKIP 1
#define TSC_TAIL_SKIP 1
#define TSC_SKIP (TSC_HEAD_SKIP + TSC_TAIL_SKIP + 1)
#define TSC_SAMPLES (TSC_SKIP + 1)
/* Register Offsets */
struct tsc_regs {
u32 rev;
u32 tscm;
u32 bwcm;
u32 swc;
u32 adcchnl;
u32 adcdata;
u32 chval[4];
};
/* TSC Mode Configuration Register (tscm) bits */
#define WMODE BIT(0)
#define TSKIND BIT(1)
#define ZMEASURE_EN BIT(2)
#define IDLE BIT(3)
#define TSC_EN BIT(4)
#define STOP BIT(5)
#define ONE_SHOT BIT(6)
#define SINGLE BIT(7)
#define AVG BIT(8)
#define AVGNUM(x) (((x) & 0x03) << 9)
#define PVSTC(x) (((x) & 0x07) << 11)
#define PON BIT(14)
#define PONBG BIT(15)
#define AFERST BIT(16)
/* ADC DATA Capture Register bits */
#define DATA_VALID BIT(16)
/* Register Access Macros */
#define tsc_read(ts, reg) __raw_readl(&(ts)->regs->reg)
#define tsc_write(ts, reg, val) __raw_writel(val, &(ts)->regs->reg);
#define tsc_set_bits(ts, reg, val) \
tsc_write(ts, reg, tsc_read(ts, reg) | (val))
#define tsc_clr_bits(ts, reg, val) \
tsc_write(ts, reg, tsc_read(ts, reg) & ~(val))
struct sample {
int x, y, p;
};
struct tsc_data {
struct input_dev *input_dev;
struct resource *res;
struct tsc_regs __iomem *regs;
struct timer_list timer;
spinlock_t lock;
struct clk *clk;
struct device *dev;
int sample_count;
struct sample samples[TSC_SAMPLES];
int tsc_irq;
};
static int tsc_read_sample(struct tsc_data *ts, struct sample* sample)
{
int x, y, z1, z2, t, p = 0;
u32 val;
val = tsc_read(ts, chval[0]);
if (val & DATA_VALID)
x = val & 0xffff;
else
return -EINVAL;
y = tsc_read(ts, chval[1]) & 0xffff;
z1 = tsc_read(ts, chval[2]) & 0xffff;
z2 = tsc_read(ts, chval[3]) & 0xffff;
if (z1) {
t = ((600 * x) * (z2 - z1));
p = t / (u32) (z1 << 12);
if (p < 0)
p = 0;
}
sample->x = x;
sample->y = y;
sample->p = p;
return 0;
}
static void tsc_poll(unsigned long data)
{
struct tsc_data *ts = (struct tsc_data *)data;
unsigned long flags;
int i, val, x, y, p;
spin_lock_irqsave(&ts->lock, flags);
if (ts->sample_count >= TSC_SKIP) {
input_report_abs(ts->input_dev, ABS_PRESSURE, 0);
input_report_key(ts->input_dev, BTN_TOUCH, 0);
input_sync(ts->input_dev);
} else if (ts->sample_count > 0) {
/*
* A touch event lasted less than our skip count. Salvage and
* report anyway.
*/
for (i = 0, val = 0; i < ts->sample_count; i++)
val += ts->samples[i].x;
x = val / ts->sample_count;
for (i = 0, val = 0; i < ts->sample_count; i++)
val += ts->samples[i].y;
y = val / ts->sample_count;
for (i = 0, val = 0; i < ts->sample_count; i++)
val += ts->samples[i].p;
p = val / ts->sample_count;
input_report_abs(ts->input_dev, ABS_X, x);
input_report_abs(ts->input_dev, ABS_Y, y);
input_report_abs(ts->input_dev, ABS_PRESSURE, p);
input_report_key(ts->input_dev, BTN_TOUCH, 1);
input_sync(ts->input_dev);
}
ts->sample_count = 0;
spin_unlock_irqrestore(&ts->lock, flags);
}
static irqreturn_t tsc_irq(int irq, void *dev_id)
{
struct tsc_data *ts = (struct tsc_data *)dev_id;
struct sample *sample;
int index;
spin_lock(&ts->lock);
index = ts->sample_count % TSC_SAMPLES;
sample = &ts->samples[index];
if (tsc_read_sample(ts, sample) < 0)
goto out;
if (++ts->sample_count >= TSC_SKIP) {
index = (ts->sample_count - TSC_TAIL_SKIP - 1) % TSC_SAMPLES;
sample = &ts->samples[index];
input_report_abs(ts->input_dev, ABS_X, sample->x);
input_report_abs(ts->input_dev, ABS_Y, sample->y);
input_report_abs(ts->input_dev, ABS_PRESSURE, sample->p);
if (ts->sample_count == TSC_SKIP)
input_report_key(ts->input_dev, BTN_TOUCH, 1);
input_sync(ts->input_dev);
}
mod_timer(&ts->timer, jiffies + TSC_PENUP_POLL);
out:
spin_unlock(&ts->lock);
return IRQ_HANDLED;
}
static int tsc_start(struct input_dev *dev)
{
struct tsc_data *ts = input_get_drvdata(dev);
unsigned long timeout = jiffies + msecs_to_jiffies(IDLE_TIMEOUT);
u32 val;
clk_enable(ts->clk);
/* Go to idle mode, before any initialization */
while (time_after(timeout, jiffies)) {
if (tsc_read(ts, tscm) & IDLE)
break;
}
if (time_before(timeout, jiffies)) {
dev_warn(ts->dev, "timeout waiting for idle\n");
clk_disable(ts->clk);
return -EIO;
}
/* Configure TSC Control register*/
val = (PONBG | PON | PVSTC(4) | ONE_SHOT | ZMEASURE_EN);
tsc_write(ts, tscm, val);
/* Bring TSC out of reset: Clear AFE reset bit */
val &= ~(AFERST);
tsc_write(ts, tscm, val);
/* Configure all pins for hardware control*/
tsc_write(ts, bwcm, 0);
/* Finally enable the TSC */
tsc_set_bits(ts, tscm, TSC_EN);
return 0;
}
static void tsc_stop(struct input_dev *dev)
{
struct tsc_data *ts = input_get_drvdata(dev);
tsc_clr_bits(ts, tscm, TSC_EN);
synchronize_irq(ts->tsc_irq);
del_timer_sync(&ts->timer);
clk_disable(ts->clk);
}
static int __devinit tsc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tsc_data *ts;
int error = 0;
u32 rev = 0;
ts = kzalloc(sizeof(struct tsc_data), GFP_KERNEL);
if (!ts) {
dev_err(dev, "cannot allocate device info\n");
return -ENOMEM;
}
ts->dev = dev;
spin_lock_init(&ts->lock);
setup_timer(&ts->timer, tsc_poll, (unsigned long)ts);
platform_set_drvdata(pdev, ts);
ts->tsc_irq = platform_get_irq(pdev, 0);
if (ts->tsc_irq < 0) {
dev_err(dev, "cannot determine device interrupt\n");
error = -ENODEV;
goto error_res;
}
ts->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ts->res) {
dev_err(dev, "cannot determine register area\n");
error = -ENODEV;
goto error_res;
}
if (!request_mem_region(ts->res->start, resource_size(ts->res),
pdev->name)) {
dev_err(dev, "cannot claim register memory\n");
ts->res = NULL;
error = -EINVAL;
goto error_res;
}
ts->regs = ioremap(ts->res->start, resource_size(ts->res));
if (!ts->regs) {
dev_err(dev, "cannot map register memory\n");
error = -ENOMEM;
goto error_map;
}
ts->clk = clk_get(dev, NULL);
if (!ts->clk) {
dev_err(dev, "cannot claim device clock\n");
error = -EINVAL;
goto error_clk;
}
error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
dev_name(dev), ts);
if (error < 0) {
dev_err(ts->dev, "Could not allocate ts irq\n");
goto error_irq;
}
ts->input_dev = input_allocate_device();
if (!ts->input_dev) {
dev_err(dev, "cannot allocate input device\n");
error = -ENOMEM;
goto error_input;
}
input_set_drvdata(ts->input_dev, ts);
ts->input_dev->name = pdev->name;
ts->input_dev->id.bustype = BUS_HOST;
ts->input_dev->dev.parent = &pdev->dev;
ts->input_dev->open = tsc_start;
ts->input_dev->close = tsc_stop;
clk_enable(ts->clk);
rev = tsc_read(ts, rev);
ts->input_dev->id.product = ((rev >> 8) & 0x07);
ts->input_dev->id.version = ((rev >> 16) & 0xfff);
clk_disable(ts->clk);
__set_bit(EV_KEY, ts->input_dev->evbit);
__set_bit(EV_ABS, ts->input_dev->evbit);
__set_bit(BTN_TOUCH, ts->input_dev->keybit);
input_set_abs_params(ts->input_dev, ABS_X, 0, 0xffff, 5, 0);
input_set_abs_params(ts->input_dev, ABS_Y, 0, 0xffff, 5, 0);
input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 4095, 128, 0);
error = input_register_device(ts->input_dev);
if (error < 0) {
dev_err(dev, "failed input device registration\n");
goto error_reg;
}
return 0;
error_reg:
input_free_device(ts->input_dev);
error_input:
free_irq(ts->tsc_irq, ts);
error_irq:
clk_put(ts->clk);
error_clk:
iounmap(ts->regs);
error_map:
release_mem_region(ts->res->start, resource_size(ts->res));
error_res:
platform_set_drvdata(pdev, NULL);
kfree(ts);
return error;
}
static int __devexit tsc_remove(struct platform_device *pdev)
{
struct tsc_data *ts = platform_get_drvdata(pdev);
input_unregister_device(ts->input_dev);
free_irq(ts->tsc_irq, ts);
clk_put(ts->clk);
iounmap(ts->regs);
release_mem_region(ts->res->start, resource_size(ts->res));
platform_set_drvdata(pdev, NULL);
kfree(ts);
return 0;
}
static struct platform_driver tsc_driver = {
.probe = tsc_probe,
.remove = __devexit_p(tsc_remove),
.driver.name = "tnetv107x-ts",
.driver.owner = THIS_MODULE,
};
static int __init tsc_init(void)
{
return platform_driver_register(&tsc_driver);
}
static void __exit tsc_exit(void)
{
platform_driver_unregister(&tsc_driver);
}
module_init(tsc_init);
module_exit(tsc_exit);
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
MODULE_ALIAS("platform: tnetv107x-ts");
MODULE_LICENSE("GPL");
......@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <mach/nand.h>
#include <mach/aemif.h>
#include <asm/mach-types.h>
......@@ -74,6 +75,8 @@ struct davinci_nand_info {
uint32_t mask_cle;
uint32_t core_chipsel;
struct davinci_aemif_timing *timing;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
......@@ -478,36 +481,6 @@ static int nand_davinci_dev_ready(struct mtd_info *mtd)
return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
}
static void __init nand_dm6446evm_flash_init(struct davinci_nand_info *info)
{
uint32_t regval, a1cr;
/*
* NAND FLASH timings @ PLL1 == 459 MHz
* - AEMIF.CLK freq = PLL1/6 = 459/6 = 76.5 MHz
* - AEMIF.CLK period = 1/76.5 MHz = 13.1 ns
*/
regval = 0
| (0 << 31) /* selectStrobe */
| (0 << 30) /* extWait (never with NAND) */
| (1 << 26) /* writeSetup 10 ns */
| (3 << 20) /* writeStrobe 40 ns */
| (1 << 17) /* writeHold 10 ns */
| (0 << 13) /* readSetup 10 ns */
| (3 << 7) /* readStrobe 60 ns */
| (0 << 4) /* readHold 10 ns */
| (3 << 2) /* turnAround ?? ns */
| (0 << 0) /* asyncSize 8-bit bus */
;
a1cr = davinci_nand_readl(info, A1CR_OFFSET);
if (a1cr != regval) {
dev_dbg(info->dev, "Warning: NAND config: Set A1CR " \
"reg to 0x%08x, was 0x%08x, should be done by " \
"bootloader.\n", regval, a1cr);
davinci_nand_writel(info, A1CR_OFFSET, regval);
}
}
/*----------------------------------------------------------------------*/
/* An ECC layout for using 4-bit ECC with small-page flash, storing
......@@ -611,6 +584,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
info->timing = pdata->timing;
info->ioaddr = (uint32_t __force) vaddr;
......@@ -688,15 +662,25 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_clk_enable;
}
/* EMIF timings should normally be set by the boot loader,
* especially after boot-from-NAND. The *only* reason to
* have this special casing for the DM6446 EVM is to work
* with boot-from-NOR ... with CS0 manually re-jumpered
* (after startup) so it addresses the NAND flash, not NOR.
* Even for dev boards, that's unusually rude...
/*
* Setup Async configuration register in case we did not boot from
* NAND and so bootloader did not bother to set it up.
*/
if (machine_is_davinci_evm())
nand_dm6446evm_flash_init(info);
val = davinci_nand_readl(info, A1CR_OFFSET + info->core_chipsel * 4);
/* Extended Wait is not valid and Select Strobe mode is not used */
val &= ~(ACR_ASIZE_MASK | ACR_EW_MASK | ACR_SS_MASK);
if (info->chip.options & NAND_BUSWIDTH_16)
val |= 0x1;
davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
ret = davinci_aemif_setup_timing(info->timing, info->base,
info->core_chipsel);
if (ret < 0) {
dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
goto err_timing;
}
spin_lock_irq(&davinci_nand_lock);
......@@ -809,6 +793,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
return 0;
err_scan:
err_timing:
clk_disable(info->clk);
err_clk_enable:
......
......@@ -954,6 +954,8 @@ config NET_NETX
config TI_DAVINCI_EMAC
tristate "TI DaVinci EMAC Support"
depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
select TI_DAVINCI_MDIO
select TI_DAVINCI_CPDMA
select PHYLIB
help
This driver supports TI's DaVinci Ethernet .
......@@ -961,6 +963,25 @@ config TI_DAVINCI_EMAC
To compile this driver as a module, choose M here: the module
will be called davinci_emac_driver. This is recommended.
config TI_DAVINCI_MDIO
tristate "TI DaVinci MDIO Support"
depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
select PHYLIB
help
This driver supports TI's DaVinci MDIO module.
To compile this driver as a module, choose M here: the module
will be called davinci_mdio. This is recommended.
config TI_DAVINCI_CPDMA
tristate "TI DaVinci CPDMA Support"
depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
help
This driver supports TI's DaVinci CPDMA dma engine.
To compile this driver as a module, choose M here: the module
will be called davinci_cpdma. This is recommended.
config DM9000
tristate "DM9000 support"
depends on ARM || BLACKFIN || MIPS
......
......@@ -7,6 +7,8 @@ obj-$(CONFIG_MDIO) += mdio.o
obj-$(CONFIG_PHYLIB) += phy/
obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
......
/*
* Texas Instruments CPDMA Driver
*
* Copyright (C) 2010 Texas Instruments
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include "davinci_cpdma.h"
/* DMA Registers */
#define CPDMA_TXIDVER 0x00
#define CPDMA_TXCONTROL 0x04
#define CPDMA_TXTEARDOWN 0x08
#define CPDMA_RXIDVER 0x10
#define CPDMA_RXCONTROL 0x14
#define CPDMA_SOFTRESET 0x1c
#define CPDMA_RXTEARDOWN 0x18
#define CPDMA_TXINTSTATRAW 0x80
#define CPDMA_TXINTSTATMASKED 0x84
#define CPDMA_TXINTMASKSET 0x88
#define CPDMA_TXINTMASKCLEAR 0x8c
#define CPDMA_MACINVECTOR 0x90
#define CPDMA_MACEOIVECTOR 0x94
#define CPDMA_RXINTSTATRAW 0xa0
#define CPDMA_RXINTSTATMASKED 0xa4
#define CPDMA_RXINTMASKSET 0xa8
#define CPDMA_RXINTMASKCLEAR 0xac
#define CPDMA_DMAINTSTATRAW 0xb0
#define CPDMA_DMAINTSTATMASKED 0xb4
#define CPDMA_DMAINTMASKSET 0xb8
#define CPDMA_DMAINTMASKCLEAR 0xbc
#define CPDMA_DMAINT_HOSTERR BIT(1)
/* the following exist only if has_ext_regs is set */
#define CPDMA_DMACONTROL 0x20
#define CPDMA_DMASTATUS 0x24
#define CPDMA_RXBUFFOFS 0x28
#define CPDMA_EM_CONTROL 0x2c
/* Descriptor mode bits */
#define CPDMA_DESC_SOP BIT(31)
#define CPDMA_DESC_EOP BIT(30)
#define CPDMA_DESC_OWNER BIT(29)
#define CPDMA_DESC_EOQ BIT(28)
#define CPDMA_DESC_TD_COMPLETE BIT(27)
#define CPDMA_DESC_PASS_CRC BIT(26)
#define CPDMA_TEARDOWN_VALUE 0xfffffffc
struct cpdma_desc {
/* hardware fields */
u32 hw_next;
u32 hw_buffer;
u32 hw_len;
u32 hw_mode;
/* software fields */
void *sw_token;
u32 sw_buffer;
u32 sw_len;
};
struct cpdma_desc_pool {
u32 phys;
void __iomem *iomap; /* ioremap map */
void *cpumap; /* dma_alloc map */
int desc_size, mem_size;
int num_desc, used_desc;
unsigned long *bitmap;
struct device *dev;
spinlock_t lock;
};
enum cpdma_state {
CPDMA_STATE_IDLE,
CPDMA_STATE_ACTIVE,
CPDMA_STATE_TEARDOWN,
};
const char *cpdma_state_str[] = { "idle", "active", "teardown" };
struct cpdma_ctlr {
enum cpdma_state state;
struct cpdma_params params;
struct device *dev;
struct cpdma_desc_pool *pool;
spinlock_t lock;
struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
};
struct cpdma_chan {
enum cpdma_state state;
struct cpdma_ctlr *ctlr;
int chan_num;
spinlock_t lock;
struct cpdma_desc __iomem *head, *tail;
int count;
void __iomem *hdp, *cp, *rxfree;
u32 mask;
cpdma_handler_fn handler;
enum dma_data_direction dir;
struct cpdma_chan_stats stats;
/* offsets into dmaregs */
int int_set, int_clear, td;
};
/* The following make access to common cpdma_ctlr params more readable */
#define dmaregs params.dmaregs
#define num_chan params.num_chan
/* various accessors */
#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
#define chan_read(chan, fld) __raw_readl((chan)->fld)
#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
/*
* Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
* emac) have dedicated on-chip memory for these descriptors. Some other
* devices (e.g. cpsw switches) use plain old memory. Descriptor pools
* abstract out these details
*/
static struct cpdma_desc_pool *
cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
{
int bitmap_size;
struct cpdma_desc_pool *pool;
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return NULL;
spin_lock_init(&pool->lock);
pool->dev = dev;
pool->mem_size = size;
pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
pool->num_desc = size / pool->desc_size;
bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!pool->bitmap)
goto fail;
if (phys) {
pool->phys = phys;
pool->iomap = ioremap(phys, size);
} else {
pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
GFP_KERNEL);
pool->iomap = (void __force __iomem *)pool->cpumap;
}
if (pool->iomap)
return pool;
fail:
kfree(pool->bitmap);
kfree(pool);
return NULL;
}
static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
{
unsigned long flags;
if (!pool)
return;
spin_lock_irqsave(&pool->lock, flags);
WARN_ON(pool->used_desc);
kfree(pool->bitmap);
if (pool->cpumap) {
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
pool->phys);
} else {
iounmap(pool->iomap);
}
spin_unlock_irqrestore(&pool->lock, flags);
kfree(pool);
}
static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
struct cpdma_desc __iomem *desc)
{
if (!desc)
return 0;
return pool->phys + (__force dma_addr_t)desc -
(__force dma_addr_t)pool->iomap;
}
static inline struct cpdma_desc __iomem *
desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
{
return dma ? pool->iomap + dma - pool->phys : NULL;
}
static struct cpdma_desc __iomem *
cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
{
unsigned long flags;
int index;
struct cpdma_desc __iomem *desc = NULL;
spin_lock_irqsave(&pool->lock, flags);
index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
num_desc, 0);
if (index < pool->num_desc) {
bitmap_set(pool->bitmap, index, num_desc);
desc = pool->iomap + pool->desc_size * index;
pool->used_desc++;
}
spin_unlock_irqrestore(&pool->lock, flags);
return desc;
}
static void cpdma_desc_free(struct cpdma_desc_pool *pool,
struct cpdma_desc __iomem *desc, int num_desc)
{
unsigned long flags, index;
index = ((unsigned long)desc - (unsigned long)pool->iomap) /
pool->desc_size;
spin_lock_irqsave(&pool->lock, flags);
bitmap_clear(pool->bitmap, index, num_desc);
pool->used_desc--;
spin_unlock_irqrestore(&pool->lock, flags);
}
struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
{
struct cpdma_ctlr *ctlr;
ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
if (!ctlr)
return NULL;
ctlr->state = CPDMA_STATE_IDLE;
ctlr->params = *params;
ctlr->dev = params->dev;
spin_lock_init(&ctlr->lock);
ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
ctlr->params.desc_mem_phys,
ctlr->params.desc_mem_size,
ctlr->params.desc_align);
if (!ctlr->pool) {
kfree(ctlr);
return NULL;
}
if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
ctlr->num_chan = CPDMA_MAX_CHANNELS;
return ctlr;
}
int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
{
unsigned long flags;
int i;
spin_lock_irqsave(&ctlr->lock, flags);
if (ctlr->state != CPDMA_STATE_IDLE) {
spin_unlock_irqrestore(&ctlr->lock, flags);
return -EBUSY;
}
if (ctlr->params.has_soft_reset) {
unsigned long timeout = jiffies + HZ/10;
dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
while (time_before(jiffies, timeout)) {
if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
break;
}
WARN_ON(!time_before(jiffies, timeout));
}
for (i = 0; i < ctlr->num_chan; i++) {
__raw_writel(0, ctlr->params.txhdp + 4 * i);
__raw_writel(0, ctlr->params.rxhdp + 4 * i);
__raw_writel(0, ctlr->params.txcp + 4 * i);
__raw_writel(0, ctlr->params.rxcp + 4 * i);
}
dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
ctlr->state = CPDMA_STATE_ACTIVE;
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
if (ctlr->channels[i])
cpdma_chan_start(ctlr->channels[i]);
}
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
{
unsigned long flags;
int i;
spin_lock_irqsave(&ctlr->lock, flags);
if (ctlr->state != CPDMA_STATE_ACTIVE) {
spin_unlock_irqrestore(&ctlr->lock, flags);
return -EINVAL;
}
ctlr->state = CPDMA_STATE_TEARDOWN;
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
if (ctlr->channels[i])
cpdma_chan_stop(ctlr->channels[i]);
}
dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
ctlr->state = CPDMA_STATE_IDLE;
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
{
struct device *dev = ctlr->dev;
unsigned long flags;
int i;
spin_lock_irqsave(&ctlr->lock, flags);
dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
dev_info(dev, "CPDMA: txidver: %x",
dma_reg_read(ctlr, CPDMA_TXIDVER));
dev_info(dev, "CPDMA: txcontrol: %x",
dma_reg_read(ctlr, CPDMA_TXCONTROL));
dev_info(dev, "CPDMA: txteardown: %x",
dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
dev_info(dev, "CPDMA: rxidver: %x",
dma_reg_read(ctlr, CPDMA_RXIDVER));
dev_info(dev, "CPDMA: rxcontrol: %x",
dma_reg_read(ctlr, CPDMA_RXCONTROL));
dev_info(dev, "CPDMA: softreset: %x",
dma_reg_read(ctlr, CPDMA_SOFTRESET));
dev_info(dev, "CPDMA: rxteardown: %x",
dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
dev_info(dev, "CPDMA: txintstatraw: %x",
dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
dev_info(dev, "CPDMA: txintstatmasked: %x",
dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
dev_info(dev, "CPDMA: txintmaskset: %x",
dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
dev_info(dev, "CPDMA: txintmaskclear: %x",
dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
dev_info(dev, "CPDMA: macinvector: %x",
dma_reg_read(ctlr, CPDMA_MACINVECTOR));
dev_info(dev, "CPDMA: maceoivector: %x",
dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
dev_info(dev, "CPDMA: rxintstatraw: %x",
dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
dev_info(dev, "CPDMA: rxintstatmasked: %x",
dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
dev_info(dev, "CPDMA: rxintmaskset: %x",
dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
dev_info(dev, "CPDMA: rxintmaskclear: %x",
dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
dev_info(dev, "CPDMA: dmaintstatraw: %x",
dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
dev_info(dev, "CPDMA: dmaintstatmasked: %x",
dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
dev_info(dev, "CPDMA: dmaintmaskset: %x",
dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
dev_info(dev, "CPDMA: dmaintmaskclear: %x",
dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
if (!ctlr->params.has_ext_regs) {
dev_info(dev, "CPDMA: dmacontrol: %x",
dma_reg_read(ctlr, CPDMA_DMACONTROL));
dev_info(dev, "CPDMA: dmastatus: %x",
dma_reg_read(ctlr, CPDMA_DMASTATUS));
dev_info(dev, "CPDMA: rxbuffofs: %x",
dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
}
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
if (ctlr->channels[i])
cpdma_chan_dump(ctlr->channels[i]);
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
{
unsigned long flags;
int ret = 0, i;
if (!ctlr)
return -EINVAL;
spin_lock_irqsave(&ctlr->lock, flags);
if (ctlr->state != CPDMA_STATE_IDLE)
cpdma_ctlr_stop(ctlr);
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
if (ctlr->channels[i])
cpdma_chan_destroy(ctlr->channels[i]);
}
cpdma_desc_pool_destroy(ctlr->pool);
spin_unlock_irqrestore(&ctlr->lock, flags);
kfree(ctlr);
return ret;
}
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
{
unsigned long flags;
int i, reg;
spin_lock_irqsave(&ctlr->lock, flags);
if (ctlr->state != CPDMA_STATE_ACTIVE) {
spin_unlock_irqrestore(&ctlr->lock, flags);
return -EINVAL;
}
reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
if (ctlr->channels[i])
cpdma_chan_int_ctrl(ctlr->channels[i], enable);
}
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
{
dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
}
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler)
{
struct cpdma_chan *chan;
int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
unsigned long flags;
if (__chan_linear(chan_num) >= ctlr->num_chan)
return NULL;
ret = -ENOMEM;
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
goto err_chan_alloc;
spin_lock_irqsave(&ctlr->lock, flags);
ret = -EBUSY;
if (ctlr->channels[chan_num])
goto err_chan_busy;
chan->ctlr = ctlr;
chan->state = CPDMA_STATE_IDLE;
chan->chan_num = chan_num;
chan->handler = handler;
if (is_rx_chan(chan)) {
chan->hdp = ctlr->params.rxhdp + offset;
chan->cp = ctlr->params.rxcp + offset;
chan->rxfree = ctlr->params.rxfree + offset;
chan->int_set = CPDMA_RXINTMASKSET;
chan->int_clear = CPDMA_RXINTMASKCLEAR;
chan->td = CPDMA_RXTEARDOWN;
chan->dir = DMA_FROM_DEVICE;
} else {
chan->hdp = ctlr->params.txhdp + offset;
chan->cp = ctlr->params.txcp + offset;
chan->int_set = CPDMA_TXINTMASKSET;
chan->int_clear = CPDMA_TXINTMASKCLEAR;
chan->td = CPDMA_TXTEARDOWN;
chan->dir = DMA_TO_DEVICE;
}
chan->mask = BIT(chan_linear(chan));
spin_lock_init(&chan->lock);
ctlr->channels[chan_num] = chan;
spin_unlock_irqrestore(&ctlr->lock, flags);
return chan;
err_chan_busy:
spin_unlock_irqrestore(&ctlr->lock, flags);
kfree(chan);
err_chan_alloc:
return ERR_PTR(ret);
}
int cpdma_chan_destroy(struct cpdma_chan *chan)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
unsigned long flags;
if (!chan)
return -EINVAL;
spin_lock_irqsave(&ctlr->lock, flags);
if (chan->state != CPDMA_STATE_IDLE)
cpdma_chan_stop(chan);
ctlr->channels[chan->chan_num] = NULL;
spin_unlock_irqrestore(&ctlr->lock, flags);
kfree(chan);
return 0;
}
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats)
{
unsigned long flags;
if (!chan)
return -EINVAL;
spin_lock_irqsave(&chan->lock, flags);
memcpy(stats, &chan->stats, sizeof(*stats));
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
int cpdma_chan_dump(struct cpdma_chan *chan)
{
unsigned long flags;
struct device *dev = chan->ctlr->dev;
spin_lock_irqsave(&chan->lock, flags);
dev_info(dev, "channel %d (%s %d) state %s",
chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
chan_linear(chan), cpdma_state_str[chan->state]);
dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
if (chan->rxfree) {
dev_info(dev, "\trxfree: %x\n",
chan_read(chan, rxfree));
}
dev_info(dev, "\tstats head_enqueue: %d\n",
chan->stats.head_enqueue);
dev_info(dev, "\tstats tail_enqueue: %d\n",
chan->stats.tail_enqueue);
dev_info(dev, "\tstats pad_enqueue: %d\n",
chan->stats.pad_enqueue);
dev_info(dev, "\tstats misqueued: %d\n",
chan->stats.misqueued);
dev_info(dev, "\tstats desc_alloc_fail: %d\n",
chan->stats.desc_alloc_fail);
dev_info(dev, "\tstats pad_alloc_fail: %d\n",
chan->stats.pad_alloc_fail);
dev_info(dev, "\tstats runt_receive_buff: %d\n",
chan->stats.runt_receive_buff);
dev_info(dev, "\tstats runt_transmit_buff: %d\n",
chan->stats.runt_transmit_buff);
dev_info(dev, "\tstats empty_dequeue: %d\n",
chan->stats.empty_dequeue);
dev_info(dev, "\tstats busy_dequeue: %d\n",
chan->stats.busy_dequeue);
dev_info(dev, "\tstats good_dequeue: %d\n",
chan->stats.good_dequeue);
dev_info(dev, "\tstats requeue: %d\n",
chan->stats.requeue);
dev_info(dev, "\tstats teardown_dequeue: %d\n",
chan->stats.teardown_dequeue);
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
static void __cpdma_chan_submit(struct cpdma_chan *chan,
struct cpdma_desc __iomem *desc)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc __iomem *prev = chan->tail;
struct cpdma_desc_pool *pool = ctlr->pool;
dma_addr_t desc_dma;
u32 mode;
desc_dma = desc_phys(pool, desc);
/* simple case - idle channel */
if (!chan->head) {
chan->stats.head_enqueue++;
chan->head = desc;
chan->tail = desc;
if (chan->state == CPDMA_STATE_ACTIVE)
chan_write(chan, hdp, desc_dma);
return;
}
/* first chain the descriptor at the tail of the list */
desc_write(prev, hw_next, desc_dma);
chan->tail = desc;
chan->stats.tail_enqueue++;
/* next check if EOQ has been triggered already */
mode = desc_read(prev, hw_mode);
if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
(chan->state == CPDMA_STATE_ACTIVE)) {
desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
chan_write(chan, hdp, desc_dma);
chan->stats.misqueued++;
}
}
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
int len, gfp_t gfp_mask)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc __iomem *desc;
dma_addr_t buffer;
unsigned long flags;
u32 mode;
int ret = 0;
spin_lock_irqsave(&chan->lock, flags);
if (chan->state == CPDMA_STATE_TEARDOWN) {
ret = -EINVAL;
goto unlock_ret;
}
desc = cpdma_desc_alloc(ctlr->pool, 1);
if (!desc) {
chan->stats.desc_alloc_fail++;
ret = -ENOMEM;
goto unlock_ret;
}
if (len < ctlr->params.min_packet_size) {
len = ctlr->params.min_packet_size;
chan->stats.runt_transmit_buff++;
}
buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
desc_write(desc, hw_next, 0);
desc_write(desc, hw_buffer, buffer);
desc_write(desc, hw_len, len);
desc_write(desc, hw_mode, mode | len);
desc_write(desc, sw_token, token);
desc_write(desc, sw_buffer, buffer);
desc_write(desc, sw_len, len);
__cpdma_chan_submit(chan, desc);
if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
chan_write(chan, rxfree, 1);
chan->count++;
unlock_ret:
spin_unlock_irqrestore(&chan->lock, flags);
return ret;
}
static void __cpdma_chan_free(struct cpdma_chan *chan,
struct cpdma_desc __iomem *desc,
int outlen, int status)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc_pool *pool = ctlr->pool;
dma_addr_t buff_dma;
int origlen;
void *token;
token = (void *)desc_read(desc, sw_token);
buff_dma = desc_read(desc, sw_buffer);
origlen = desc_read(desc, sw_len);
dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
cpdma_desc_free(pool, desc, 1);
(*chan->handler)(token, outlen, status);
}
static int __cpdma_chan_process(struct cpdma_chan *chan)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc __iomem *desc;
int status, outlen;
struct cpdma_desc_pool *pool = ctlr->pool;
dma_addr_t desc_dma;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
desc = chan->head;
if (!desc) {
chan->stats.empty_dequeue++;
status = -ENOENT;
goto unlock_ret;
}
desc_dma = desc_phys(pool, desc);
status = __raw_readl(&desc->hw_mode);
outlen = status & 0x7ff;
if (status & CPDMA_DESC_OWNER) {
chan->stats.busy_dequeue++;
status = -EBUSY;
goto unlock_ret;
}
status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
chan_write(chan, cp, desc_dma);
chan->count--;
chan->stats.good_dequeue++;
if (status & CPDMA_DESC_EOQ) {
chan->stats.requeue++;
chan_write(chan, hdp, desc_phys(pool, chan->head));
}
spin_unlock_irqrestore(&chan->lock, flags);
__cpdma_chan_free(chan, desc, outlen, status);
return status;
unlock_ret:
spin_unlock_irqrestore(&chan->lock, flags);
return status;
}
int cpdma_chan_process(struct cpdma_chan *chan, int quota)
{
int used = 0, ret = 0;
if (chan->state != CPDMA_STATE_ACTIVE)
return -EINVAL;
while (used < quota) {
ret = __cpdma_chan_process(chan);
if (ret < 0)
break;
used++;
}
return used;
}
int cpdma_chan_start(struct cpdma_chan *chan)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc_pool *pool = ctlr->pool;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
if (chan->state != CPDMA_STATE_IDLE) {
spin_unlock_irqrestore(&chan->lock, flags);
return -EBUSY;
}
if (ctlr->state != CPDMA_STATE_ACTIVE) {
spin_unlock_irqrestore(&chan->lock, flags);
return -EINVAL;
}
dma_reg_write(ctlr, chan->int_set, chan->mask);
chan->state = CPDMA_STATE_ACTIVE;
if (chan->head) {
chan_write(chan, hdp, desc_phys(pool, chan->head));
if (chan->rxfree)
chan_write(chan, rxfree, chan->count);
}
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
int cpdma_chan_stop(struct cpdma_chan *chan)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc_pool *pool = ctlr->pool;
unsigned long flags;
int ret;
unsigned long timeout;
spin_lock_irqsave(&chan->lock, flags);
if (chan->state != CPDMA_STATE_ACTIVE) {
spin_unlock_irqrestore(&chan->lock, flags);
return -EINVAL;
}
chan->state = CPDMA_STATE_TEARDOWN;
dma_reg_write(ctlr, chan->int_clear, chan->mask);
/* trigger teardown */
dma_reg_write(ctlr, chan->td, chan->chan_num);
/* wait for teardown complete */
timeout = jiffies + HZ/10; /* 100 msec */
while (time_before(jiffies, timeout)) {
u32 cp = chan_read(chan, cp);
if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
break;
cpu_relax();
}
WARN_ON(!time_before(jiffies, timeout));
chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
/* handle completed packets */
do {
ret = __cpdma_chan_process(chan);
if (ret < 0)
break;
} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
/* remaining packets haven't been tx/rx'ed, clean them up */
while (chan->head) {
struct cpdma_desc __iomem *desc = chan->head;
dma_addr_t next_dma;
next_dma = desc_read(desc, hw_next);
chan->head = desc_from_phys(pool, next_dma);
chan->stats.teardown_dequeue++;
/* issue callback without locks held */
spin_unlock_irqrestore(&chan->lock, flags);
__cpdma_chan_free(chan, desc, 0, -ENOSYS);
spin_lock_irqsave(&chan->lock, flags);
}
chan->state = CPDMA_STATE_IDLE;
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
{
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
if (chan->state != CPDMA_STATE_ACTIVE) {
spin_unlock_irqrestore(&chan->lock, flags);
return -EINVAL;
}
dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
chan->mask);
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
struct cpdma_control_info {
u32 reg;
u32 shift, mask;
int access;
#define ACCESS_RO BIT(0)
#define ACCESS_WO BIT(1)
#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
};
struct cpdma_control_info controls[] = {
[CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
[CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
[CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
[CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
[CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
[CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
[CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
[CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
[CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
};
int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
{
unsigned long flags;
struct cpdma_control_info *info = &controls[control];
int ret;
spin_lock_irqsave(&ctlr->lock, flags);
ret = -ENOTSUPP;
if (!ctlr->params.has_ext_regs)
goto unlock_ret;
ret = -EINVAL;
if (ctlr->state != CPDMA_STATE_ACTIVE)
goto unlock_ret;
ret = -ENOENT;
if (control < 0 || control >= ARRAY_SIZE(controls))
goto unlock_ret;
ret = -EPERM;
if ((info->access & ACCESS_RO) != ACCESS_RO)
goto unlock_ret;
ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
unlock_ret:
spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
{
unsigned long flags;
struct cpdma_control_info *info = &controls[control];
int ret;
u32 val;
spin_lock_irqsave(&ctlr->lock, flags);
ret = -ENOTSUPP;
if (!ctlr->params.has_ext_regs)
goto unlock_ret;
ret = -EINVAL;
if (ctlr->state != CPDMA_STATE_ACTIVE)
goto unlock_ret;
ret = -ENOENT;
if (control < 0 || control >= ARRAY_SIZE(controls))
goto unlock_ret;
ret = -EPERM;
if ((info->access & ACCESS_WO) != ACCESS_WO)
goto unlock_ret;
val = dma_reg_read(ctlr, info->reg);
val &= ~(info->mask << info->shift);
val |= (value & info->mask) << info->shift;
dma_reg_write(ctlr, info->reg, val);
ret = 0;
unlock_ret:
spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
/*
* Texas Instruments CPDMA Driver
*
* Copyright (C) 2010 Texas Instruments
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __DAVINCI_CPDMA_H__
#define __DAVINCI_CPDMA_H__
#define CPDMA_MAX_CHANNELS BITS_PER_LONG
#define tx_chan_num(chan) (chan)
#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
#define is_tx_chan(chan) (!is_rx_chan(chan))
#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
#define chan_linear(chan) __chan_linear((chan)->chan_num)
struct cpdma_params {
struct device *dev;
void __iomem *dmaregs;
void __iomem *txhdp, *rxhdp, *txcp, *rxcp;
void __iomem *rxthresh, *rxfree;
int num_chan;
bool has_soft_reset;
int min_packet_size;
u32 desc_mem_phys;
int desc_mem_size;
int desc_align;
/*
* Some instances of embedded cpdma controllers have extra control and
* status registers. The following flag enables access to these
* "extended" registers.
*/
bool has_ext_regs;
};
struct cpdma_chan_stats {
u32 head_enqueue;
u32 tail_enqueue;
u32 pad_enqueue;
u32 misqueued;
u32 desc_alloc_fail;
u32 pad_alloc_fail;
u32 runt_receive_buff;
u32 runt_transmit_buff;
u32 empty_dequeue;
u32 busy_dequeue;
u32 good_dequeue;
u32 requeue;
u32 teardown_dequeue;
};
struct cpdma_ctlr;
struct cpdma_chan;
typedef void (*cpdma_handler_fn)(void *token, int len, int status);
struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler);
int cpdma_chan_destroy(struct cpdma_chan *chan);
int cpdma_chan_start(struct cpdma_chan *chan);
int cpdma_chan_stop(struct cpdma_chan *chan);
int cpdma_chan_dump(struct cpdma_chan *chan);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
int len, gfp_t gfp_mask);
int cpdma_chan_process(struct cpdma_chan *chan, int quota);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
enum cpdma_control {
CPDMA_CMD_IDLE, /* write-only */
CPDMA_COPY_ERROR_FRAMES, /* read-write */
CPDMA_RX_OFF_LEN_UPDATE, /* read-write */
CPDMA_RX_OWNERSHIP_FLIP, /* read-write */
CPDMA_TX_PRIO_FIXED, /* read-write */
CPDMA_STAT_IDLE, /* read-only */
CPDMA_STAT_TX_ERR_CHAN, /* read-only */
CPDMA_STAT_TX_ERR_CODE, /* read-only */
CPDMA_STAT_RX_ERR_CHAN, /* read-only */
CPDMA_STAT_RX_ERR_CODE, /* read-only */
CPDMA_RX_BUFFER_OFFSET, /* read-write */
};
int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
#endif
......@@ -63,6 +63,8 @@
#include <asm/irq.h>
#include <asm/page.h>
#include "davinci_cpdma.h"
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
......@@ -113,7 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4)
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
#define EMAC_DEF_MDIO_TICK_MS (10) /* typically 1 tick=1 ms) */
#define EMAC_DEF_RX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
......@@ -125,7 +127,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* EMAC register related defines */
#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF)
#define EMAC_NUM_MULTICAST_BITS (64)
#define EMAC_TEARDOWN_VALUE (0xFFFFFFFC)
#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1)
#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1)
#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2)
......@@ -212,24 +213,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
/* EMAC Peripheral Device Register Memory Layout structure */
#define EMAC_TXIDVER 0x0
#define EMAC_TXCONTROL 0x4
#define EMAC_TXTEARDOWN 0x8
#define EMAC_RXIDVER 0x10
#define EMAC_RXCONTROL 0x14
#define EMAC_RXTEARDOWN 0x18
#define EMAC_TXINTSTATRAW 0x80
#define EMAC_TXINTSTATMASKED 0x84
#define EMAC_TXINTMASKSET 0x88
#define EMAC_TXINTMASKCLEAR 0x8C
#define EMAC_MACINVECTOR 0x90
#define EMAC_DM646X_MACEOIVECTOR 0x94
#define EMAC_RXINTSTATRAW 0xA0
#define EMAC_RXINTSTATMASKED 0xA4
#define EMAC_RXINTMASKSET 0xA8
#define EMAC_RXINTMASKCLEAR 0xAC
#define EMAC_MACINTSTATRAW 0xB0
#define EMAC_MACINTSTATMASKED 0xB4
#define EMAC_MACINTMASKSET 0xB8
......@@ -256,12 +243,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_MACADDRHI 0x504
#define EMAC_MACINDEX 0x508
/* EMAC HDP and Completion registors */
#define EMAC_TXHDP(ch) (0x600 + (ch * 4))
#define EMAC_RXHDP(ch) (0x620 + (ch * 4))
#define EMAC_TXCP(ch) (0x640 + (ch * 4))
#define EMAC_RXCP(ch) (0x660 + (ch * 4))
/* EMAC statistics registers */
#define EMAC_RXGOODFRAMES 0x200
#define EMAC_RXBCASTFRAMES 0x204
......@@ -303,25 +284,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DM644X_INTMIN_INTVL 0x1
#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK)
/* EMAC MDIO related */
/* Mask & Control defines */
#define MDIO_CONTROL_CLKDIV (0xFF)
#define MDIO_CONTROL_ENABLE BIT(30)
#define MDIO_USERACCESS_GO BIT(31)
#define MDIO_USERACCESS_WRITE BIT(30)
#define MDIO_USERACCESS_READ (0)
#define MDIO_USERACCESS_REGADR (0x1F << 21)
#define MDIO_USERACCESS_PHYADR (0x1F << 16)
#define MDIO_USERACCESS_DATA (0xFFFF)
#define MDIO_USERPHYSEL_LINKSEL BIT(7)
#define MDIO_VER_MODID (0xFFFF << 16)
#define MDIO_VER_REVMAJ (0xFF << 8)
#define MDIO_VER_REVMIN (0xFF)
#define MDIO_USERACCESS(inst) (0x80 + (inst * 8))
#define MDIO_USERPHYSEL(inst) (0x84 + (inst * 8))
#define MDIO_CONTROL (0x04)
/* EMAC DM646X control module registers */
#define EMAC_DM646X_CMINTCTRL 0x0C
#define EMAC_DM646X_CMRXINTEN 0x14
......@@ -345,120 +307,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* EMAC Stats Clear Mask */
#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
/** net_buf_obj: EMAC network bufferdata structure
*
* EMAC network buffer data structure
*/
struct emac_netbufobj {
void *buf_token;
char *data_ptr;
int length;
};
/** net_pkt_obj: EMAC network packet data structure
*
* EMAC network packet data structure - supports buffer list (for future)
*/
struct emac_netpktobj {
void *pkt_token; /* data token may hold tx/rx chan id */
struct emac_netbufobj *buf_list; /* array of network buffer objects */
int num_bufs;
int pkt_length;
};
/** emac_tx_bd: EMAC TX Buffer descriptor data structure
*
* EMAC TX Buffer descriptor data structure
*/
struct emac_tx_bd {
int h_next;
int buff_ptr;
int off_b_len;
int mode; /* SOP, EOP, ownership, EOQ, teardown,Qstarv, length */
struct emac_tx_bd __iomem *next;
void *buf_token;
};
/** emac_txch: EMAC TX Channel data structure
*
* EMAC TX Channel data structure
*/
struct emac_txch {
/* Config related */
u32 num_bd;
u32 service_max;
/* CPPI specific */
u32 alloc_size;
void __iomem *bd_mem;
struct emac_tx_bd __iomem *bd_pool_head;
struct emac_tx_bd __iomem *active_queue_head;
struct emac_tx_bd __iomem *active_queue_tail;
struct emac_tx_bd __iomem *last_hw_bdprocessed;
u32 queue_active;
u32 teardown_pending;
u32 *tx_complete;
/** statistics */
u32 proc_count; /* TX: # of times emac_tx_bdproc is called */
u32 mis_queued_packets;
u32 queue_reinit;
u32 end_of_queue_add;
u32 out_of_tx_bd;
u32 no_active_pkts; /* IRQ when there were no packets to process */
u32 active_queue_count;
};
/** emac_rx_bd: EMAC RX Buffer descriptor data structure
*
* EMAC RX Buffer descriptor data structure
*/
struct emac_rx_bd {
int h_next;
int buff_ptr;
int off_b_len;
int mode;
struct emac_rx_bd __iomem *next;
void *data_ptr;
void *buf_token;
};
/** emac_rxch: EMAC RX Channel data structure
*
* EMAC RX Channel data structure
*/
struct emac_rxch {
/* configuration info */
u32 num_bd;
u32 service_max;
u32 buf_size;
char mac_addr[6];
/** CPPI specific */
u32 alloc_size;
void __iomem *bd_mem;
struct emac_rx_bd __iomem *bd_pool_head;
struct emac_rx_bd __iomem *active_queue_head;
struct emac_rx_bd __iomem *active_queue_tail;
u32 queue_active;
u32 teardown_pending;
/* packet and buffer objects */
struct emac_netpktobj pkt_queue;
struct emac_netbufobj buf_queue;
/** statistics */
u32 proc_count; /* number of times emac_rx_bdproc is called */
u32 processed_bd;
u32 recycled_bd;
u32 out_of_rx_bd;
u32 out_of_rx_buffers;
u32 queue_reinit;
u32 end_of_queue_add;
u32 end_of_queue;
u32 mis_queued_packets;
};
/* emac_priv: EMAC private data structure
*
* EMAC adapter private data structure
......@@ -469,17 +317,13 @@ struct emac_priv {
struct platform_device *pdev;
struct napi_struct napi;
char mac_addr[6];
spinlock_t tx_lock;
spinlock_t rx_lock;
void __iomem *remap_addr;
u32 emac_base_phys;
void __iomem *emac_base;
void __iomem *ctrl_base;
void __iomem *emac_ctrl_ram;
u32 ctrl_ram_size;
u32 hw_ram_addr;
struct emac_txch *txch[EMAC_DEF_MAX_TX_CH];
struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH];
struct cpdma_ctlr *dma;
struct cpdma_chan *txchan;
struct cpdma_chan *rxchan;
u32 link; /* 1=link on, 0=link off */
u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
u32 duplex; /* Link duplex: 0=Half, 1=Full */
......@@ -493,13 +337,7 @@ struct emac_priv {
u32 mac_hash2;
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
/* periodic timer required for MDIO polling */
struct timer_list periodic_timer;
u32 periodic_ticks;
u32 timer_active;
u32 phy_mask;
/* mii_bus,phy members */
struct mii_bus *mii_bus;
const char *phy_id;
struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
......@@ -510,19 +348,6 @@ struct emac_priv {
/* clock frequency for EMAC */
static struct clk *emac_clk;
static unsigned long emac_bus_frequency;
static unsigned long mdio_max_freq;
#define emac_virt_to_phys(addr, priv) \
(((u32 __force)(addr) - (u32 __force)(priv->emac_ctrl_ram)) \
+ priv->hw_ram_addr)
/* Cache macros - Packet buffers would be from skb pool which is cached */
#define EMAC_VIRT_NOCACHE(addr) (addr)
/* DM644x does not have BD's in cached memory - so no cache functions */
#define BD_CACHE_INVALIDATE(addr, size)
#define BD_CACHE_WRITEBACK(addr, size)
#define BD_CACHE_WRITEBACK_INVALIDATE(addr, size)
/* EMAC TX Host Error description strings */
static char *emac_txhost_errcodes[16] = {
......@@ -548,9 +373,6 @@ static char *emac_rxhost_errcodes[16] = {
#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg)))
#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
#define emac_mdio_read(reg) ioread32(bus->priv + (reg))
#define emac_mdio_write(reg, val) iowrite32(val, (bus->priv + (reg)))
/**
* emac_dump_regs: Dump important EMAC registers to debug terminal
* @priv: The DaVinci EMAC private adapter structure
......@@ -569,20 +391,6 @@ static void emac_dump_regs(struct emac_priv *priv)
emac_ctrl_read(EMAC_CTRL_EWCTL),
emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
}
dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n",
emac_read(EMAC_TXIDVER),
((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"),
emac_read(EMAC_RXIDVER),
((emac_read(EMAC_RXCONTROL)) ? "enabled" : "disabled"));
dev_info(emac_dev, "EMAC: TXIntRaw:%08X, TxIntMasked: %08X, "\
"TxIntMasSet: %08X\n", emac_read(EMAC_TXINTSTATRAW),
emac_read(EMAC_TXINTSTATMASKED), emac_read(EMAC_TXINTMASKSET));
dev_info(emac_dev, "EMAC: RXIntRaw:%08X, RxIntMasked: %08X, "\
"RxIntMasSet: %08X\n", emac_read(EMAC_RXINTSTATRAW),
emac_read(EMAC_RXINTSTATMASKED), emac_read(EMAC_RXINTMASKSET));
dev_info(emac_dev, "EMAC: MacIntRaw:%08X, MacIntMasked: %08X, "\
"MacInVector=%08X\n", emac_read(EMAC_MACINTSTATRAW),
emac_read(EMAC_MACINTSTATMASKED), emac_read(EMAC_MACINVECTOR));
dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n",
emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL));
dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\
......@@ -591,8 +399,6 @@ static void emac_dump_regs(struct emac_priv *priv)
dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\
"MacConfig=%08X\n", emac_read(EMAC_MACCONTROL),
emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG));
dev_info(emac_dev, "EMAC: TXHDP[0]:%08X, RXHDP[0]: %08X\n",
emac_read(EMAC_TXHDP(0)), emac_read(EMAC_RXHDP(0)));
dev_info(emac_dev, "EMAC Statistics\n");
dev_info(emac_dev, "EMAC: rx_good_frames:%d\n",
emac_read(EMAC_RXGOODFRAMES));
......@@ -654,11 +460,10 @@ static void emac_dump_regs(struct emac_priv *priv)
emac_read(EMAC_RXMOFOVERRUNS));
dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n",
emac_read(EMAC_RXDMAOVERRUNS));
cpdma_ctlr_dump(priv->dma);
}
/*************************************************************************
* EMAC MDIO/Phy Functionality
*************************************************************************/
/**
* emac_get_drvinfo: Get EMAC driver information
* @ndev: The DaVinci EMAC network adapter
......@@ -686,7 +491,7 @@ static int emac_get_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
{
struct emac_priv *priv = netdev_priv(ndev);
if (priv->phy_mask)
if (priv->phydev)
return phy_ethtool_gset(priv->phydev, ecmd);
else
return -EOPNOTSUPP;
......@@ -704,7 +509,7 @@ static int emac_get_settings(struct net_device *ndev,
static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
{
struct emac_priv *priv = netdev_priv(ndev);
if (priv->phy_mask)
if (priv->phydev)
return phy_ethtool_sset(priv->phydev, ecmd);
else
return -EOPNOTSUPP;
......@@ -841,7 +646,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
mac_control = emac_read(EMAC_MACCONTROL);
cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
DUPLEX_FULL : DUPLEX_HALF;
if (priv->phy_mask)
if (priv->phydev)
new_duplex = priv->phydev->duplex;
else
new_duplex = DUPLEX_FULL;
......@@ -1184,371 +989,68 @@ static irqreturn_t emac_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
/** EMAC on-chip buffer descriptor memory
*
* WARNING: Please note that the on chip memory is used for both TX and RX
* buffer descriptor queues and is equally divided between TX and RX desc's
* If the number of TX or RX descriptors change this memory pointers need
* to be adjusted. If external memory is allocated then these pointers can
* pointer to the memory
*
*/
#define EMAC_TX_BD_MEM(priv) ((priv)->emac_ctrl_ram)
#define EMAC_RX_BD_MEM(priv) ((priv)->emac_ctrl_ram + \
(((priv)->ctrl_ram_size) >> 1))
/**
* emac_init_txch: TX channel initialization
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
*
* Called during device init to setup a TX channel (allocate buffer desc
* create free pool and keep ready for transmission
*
* Returns success(0) or mem alloc failures error code
*/
static int emac_init_txch(struct emac_priv *priv, u32 ch)
{
struct device *emac_dev = &priv->ndev->dev;
u32 cnt, bd_size;
void __iomem *mem;
struct emac_tx_bd __iomem *curr_bd;
struct emac_txch *txch = NULL;
txch = kzalloc(sizeof(struct emac_txch), GFP_KERNEL);
if (NULL == txch) {
dev_err(emac_dev, "DaVinci EMAC: TX Ch mem alloc failed");
return -ENOMEM;
}
priv->txch[ch] = txch;
txch->service_max = EMAC_DEF_TX_MAX_SERVICE;
txch->active_queue_head = NULL;
txch->active_queue_tail = NULL;
txch->queue_active = 0;
txch->teardown_pending = 0;
/* allocate memory for TX CPPI channel on a 4 byte boundry */
txch->tx_complete = kzalloc(txch->service_max * sizeof(u32),
GFP_KERNEL);
if (NULL == txch->tx_complete) {
dev_err(emac_dev, "DaVinci EMAC: Tx service mem alloc failed");
kfree(txch);
return -ENOMEM;
}
/* allocate buffer descriptor pool align every BD on four word
* boundry for future requirements */
bd_size = (sizeof(struct emac_tx_bd) + 0xF) & ~0xF;
txch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
txch->alloc_size = (((bd_size * txch->num_bd) + 0xF) & ~0xF);
/* alloc TX BD memory */
txch->bd_mem = EMAC_TX_BD_MEM(priv);
__memzero((void __force *)txch->bd_mem, txch->alloc_size);
/* initialize the BD linked list */
mem = (void __force __iomem *)
(((u32 __force) txch->bd_mem + 0xF) & ~0xF);
txch->bd_pool_head = NULL;
for (cnt = 0; cnt < txch->num_bd; cnt++) {
curr_bd = mem + (cnt * bd_size);
curr_bd->next = txch->bd_pool_head;
txch->bd_pool_head = curr_bd;
}
/* reset statistics counters */
txch->out_of_tx_bd = 0;
txch->no_active_pkts = 0;
txch->active_queue_count = 0;
return 0;
}
/**
* emac_cleanup_txch: Book-keep function to clean TX channel resources
* @priv: The DaVinci EMAC private adapter structure
* @ch: TX channel number
*
* Called to clean up TX channel resources
*
*/
static void emac_cleanup_txch(struct emac_priv *priv, u32 ch)
static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
{
struct emac_txch *txch = priv->txch[ch];
if (txch) {
if (txch->bd_mem)
txch->bd_mem = NULL;
kfree(txch->tx_complete);
kfree(txch);
priv->txch[ch] = NULL;
}
struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size);
if (WARN_ON(!skb))
return NULL;
skb->dev = priv->ndev;
skb_reserve(skb, NET_IP_ALIGN);
return skb;
}
/**
* emac_net_tx_complete: TX packet completion function
* @priv: The DaVinci EMAC private adapter structure
* @net_data_tokens: packet token - skb pointer
* @num_tokens: number of skb's to free
* @ch: TX channel number
*
* Frees the skb once packet is transmitted
*
*/
static int emac_net_tx_complete(struct emac_priv *priv,
void **net_data_tokens,
int num_tokens, u32 ch)
static void emac_rx_handler(void *token, int len, int status)
{
struct net_device *ndev = priv->ndev;
u32 cnt;
if (unlikely(num_tokens && netif_queue_stopped(ndev)))
netif_start_queue(ndev);
for (cnt = 0; cnt < num_tokens; cnt++) {
struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt];
if (skb == NULL)
continue;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
int ret;
/* free and bail if we are shutting down */
if (unlikely(!netif_running(ndev))) {
dev_kfree_skb_any(skb);
return;
}
return 0;
}
/**
* emac_txch_teardown: TX channel teardown
* @priv: The DaVinci EMAC private adapter structure
* @ch: TX channel number
*
* Called to teardown TX channel
*
*/
static void emac_txch_teardown(struct emac_priv *priv, u32 ch)
{
struct device *emac_dev = &priv->ndev->dev;
u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
struct emac_txch *txch = priv->txch[ch];
struct emac_tx_bd __iomem *curr_bd;
while ((emac_read(EMAC_TXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
EMAC_TEARDOWN_VALUE) {
/* wait till tx teardown complete */
cpu_relax(); /* TODO: check if this helps ... */
--teardown_cnt;
if (0 == teardown_cnt) {
dev_err(emac_dev, "EMAC: TX teardown aborted\n");
break;
}
}
emac_write(EMAC_TXCP(ch), EMAC_TEARDOWN_VALUE);
/* process sent packets and return skb's to upper layer */
if (1 == txch->queue_active) {
curr_bd = txch->active_queue_head;
while (curr_bd != NULL) {
dma_unmap_single(emac_dev, curr_bd->buff_ptr,
curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
DMA_TO_DEVICE);
emac_net_tx_complete(priv, (void __force *)
&curr_bd->buf_token, 1, ch);
if (curr_bd != txch->active_queue_tail)
curr_bd = curr_bd->next;
else
break;
}
txch->bd_pool_head = txch->active_queue_head;
txch->active_queue_head =
txch->active_queue_tail = NULL;
}
}
/**
* emac_stop_txch: Stop TX channel operation
* @priv: The DaVinci EMAC private adapter structure
* @ch: TX channel number
*
* Called to stop TX channel operation
*
*/
static void emac_stop_txch(struct emac_priv *priv, u32 ch)
{
struct emac_txch *txch = priv->txch[ch];
if (txch) {
txch->teardown_pending = 1;
emac_write(EMAC_TXTEARDOWN, 0);
emac_txch_teardown(priv, ch);
txch->teardown_pending = 0;
emac_write(EMAC_TXINTMASKCLEAR, BIT(ch));
/* recycle on recieve error */
if (status < 0) {
ndev->stats.rx_errors++;
goto recycle;
}
}
/**
* emac_tx_bdproc: TX buffer descriptor (packet) processing
* @priv: The DaVinci EMAC private adapter structure
* @ch: TX channel number to process buffer descriptors for
* @budget: number of packets allowed to process
* @pending: indication to caller that packets are pending to process
*
* Processes TX buffer descriptors after packets are transmitted - checks
* ownership bit on the TX * descriptor and requeues it to free pool & frees
* the SKB buffer. Only "budget" number of packets are processed and
* indication of pending packets provided to the caller
*
* Returns number of packets processed
*/
static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
{
struct device *emac_dev = &priv->ndev->dev;
unsigned long flags;
u32 frame_status;
u32 pkts_processed = 0;
u32 tx_complete_cnt = 0;
struct emac_tx_bd __iomem *curr_bd;
struct emac_txch *txch = priv->txch[ch];
u32 *tx_complete_ptr = txch->tx_complete;
if (unlikely(1 == txch->teardown_pending)) {
if (netif_msg_tx_err(priv) && net_ratelimit()) {
dev_err(emac_dev, "DaVinci EMAC:emac_tx_bdproc: "\
"teardown pending\n");
}
return 0; /* dont handle any pkt completions */
}
/* feed received packet up the stack */
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
ndev->stats.rx_packets++;
++txch->proc_count;
spin_lock_irqsave(&priv->tx_lock, flags);
curr_bd = txch->active_queue_head;
if (NULL == curr_bd) {
emac_write(EMAC_TXCP(ch),
emac_virt_to_phys(txch->last_hw_bdprocessed, priv));
txch->no_active_pkts++;
spin_unlock_irqrestore(&priv->tx_lock, flags);
return 0;
/* alloc a new packet for receive */
skb = emac_rx_alloc(priv);
if (!skb) {
if (netif_msg_rx_err(priv) && net_ratelimit())
dev_err(emac_dev, "failed rx buffer alloc\n");
return;
}
BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
frame_status = curr_bd->mode;
while ((curr_bd) &&
((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
(pkts_processed < budget)) {
emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd, priv));
txch->active_queue_head = curr_bd->next;
if (frame_status & EMAC_CPPI_EOQ_BIT) {
if (curr_bd->next) { /* misqueued packet */
emac_write(EMAC_TXHDP(ch), curr_bd->h_next);
++txch->mis_queued_packets;
} else {
txch->queue_active = 0; /* end of queue */
}
}
dma_unmap_single(emac_dev, curr_bd->buff_ptr,
curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
DMA_TO_DEVICE);
*tx_complete_ptr = (u32) curr_bd->buf_token;
++tx_complete_ptr;
++tx_complete_cnt;
curr_bd->next = txch->bd_pool_head;
txch->bd_pool_head = curr_bd;
--txch->active_queue_count;
pkts_processed++;
txch->last_hw_bdprocessed = curr_bd;
curr_bd = txch->active_queue_head;
if (curr_bd) {
BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
frame_status = curr_bd->mode;
}
} /* end of pkt processing loop */
emac_net_tx_complete(priv,
(void *)&txch->tx_complete[0],
tx_complete_cnt, ch);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return pkts_processed;
recycle:
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
skb_tailroom(skb), GFP_KERNEL);
if (WARN_ON(ret < 0))
dev_kfree_skb_any(skb);
}
#define EMAC_ERR_TX_OUT_OF_BD -1
/**
* emac_send: EMAC Transmit function (internal)
* @priv: The DaVinci EMAC private adapter structure
* @pkt: packet pointer (contains skb ptr)
* @ch: TX channel number
*
* Called by the transmit function to queue the packet in EMAC hardware queue
*
* Returns success(0) or error code (typically out of desc's)
*/
static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
static void emac_tx_handler(void *token, int len, int status)
{
unsigned long flags;
struct emac_tx_bd __iomem *curr_bd;
struct emac_txch *txch;
struct emac_netbufobj *buf_list;
txch = priv->txch[ch];
buf_list = pkt->buf_list; /* get handle to the buffer array */
/* check packet size and pad if short */
if (pkt->pkt_length < EMAC_DEF_MIN_ETHPKTSIZE) {
buf_list->length += (EMAC_DEF_MIN_ETHPKTSIZE - pkt->pkt_length);
pkt->pkt_length = EMAC_DEF_MIN_ETHPKTSIZE;
}
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
spin_lock_irqsave(&priv->tx_lock, flags);
curr_bd = txch->bd_pool_head;
if (curr_bd == NULL) {
txch->out_of_tx_bd++;
spin_unlock_irqrestore(&priv->tx_lock, flags);
return EMAC_ERR_TX_OUT_OF_BD;
}
txch->bd_pool_head = curr_bd->next;
curr_bd->buf_token = buf_list->buf_token;
curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr,
buf_list->length, DMA_TO_DEVICE);
curr_bd->off_b_len = buf_list->length;
curr_bd->h_next = 0;
curr_bd->next = NULL;
curr_bd->mode = (EMAC_CPPI_SOP_BIT | EMAC_CPPI_OWNERSHIP_BIT |
EMAC_CPPI_EOP_BIT | pkt->pkt_length);
/* flush the packet from cache if write back cache is present */
BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
/* send the packet */
if (txch->active_queue_head == NULL) {
txch->active_queue_head = curr_bd;
txch->active_queue_tail = curr_bd;
if (1 != txch->queue_active) {
emac_write(EMAC_TXHDP(ch),
emac_virt_to_phys(curr_bd, priv));
txch->queue_active = 1;
}
++txch->queue_reinit;
} else {
register struct emac_tx_bd __iomem *tail_bd;
register u32 frame_status;
tail_bd = txch->active_queue_tail;
tail_bd->next = curr_bd;
txch->active_queue_tail = curr_bd;
tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
tail_bd->h_next = (int)emac_virt_to_phys(curr_bd, priv);
frame_status = tail_bd->mode;
if (frame_status & EMAC_CPPI_EOQ_BIT) {
emac_write(EMAC_TXHDP(ch),
emac_virt_to_phys(curr_bd, priv));
frame_status &= ~(EMAC_CPPI_EOQ_BIT);
tail_bd->mode = frame_status;
++txch->end_of_queue_add;
}
}
txch->active_queue_count++;
spin_unlock_irqrestore(&priv->tx_lock, flags);
return 0;
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
}
/**
......@@ -1565,42 +1067,36 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
int ret_code;
struct emac_netbufobj tx_buf; /* buffer obj-only single frame support */
struct emac_netpktobj tx_packet; /* packet object */
struct emac_priv *priv = netdev_priv(ndev);
/* If no link, return */
if (unlikely(!priv->link)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: No link to transmit");
return NETDEV_TX_BUSY;
goto fail_tx;
}
ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
if (unlikely(ret_code < 0)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: packet pad failed");
goto fail_tx;
}
/* Build the buffer and packet objects - Since only single fragment is
* supported, need not set length and token in both packet & object.
* Doing so for completeness sake & to show that this needs to be done
* in multifragment case
*/
tx_packet.buf_list = &tx_buf;
tx_packet.num_bufs = 1; /* only single fragment supported */
tx_packet.pkt_length = skb->len;
tx_packet.pkt_token = (void *)skb;
tx_buf.length = skb->len;
tx_buf.buf_token = (void *)skb;
tx_buf.data_ptr = skb->data;
ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
GFP_KERNEL);
if (unlikely(ret_code != 0)) {
if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: xmit() fatal"\
" err. Out of TX BD's");
netif_stop_queue(priv->ndev);
}
ndev->stats.tx_dropped++;
return NETDEV_TX_BUSY;
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
goto fail_tx;
}
return NETDEV_TX_OK;
fail_tx:
ndev->stats.tx_dropped++;
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
/**
......@@ -1621,217 +1117,15 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
if (netif_msg_tx_err(priv))
dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
emac_dump_regs(priv);
ndev->stats.tx_errors++;
emac_int_disable(priv);
emac_stop_txch(priv, EMAC_DEF_TX_CH);
emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
emac_init_txch(priv, EMAC_DEF_TX_CH);
emac_write(EMAC_TXHDP(0), 0);
emac_write(EMAC_TXINTMASKSET, BIT(EMAC_DEF_TX_CH));
cpdma_chan_stop(priv->txchan);
cpdma_chan_start(priv->txchan);
emac_int_enable(priv);
}
/**
* emac_net_alloc_rx_buf: Allocate a skb for RX
* @priv: The DaVinci EMAC private adapter structure
* @buf_size: size of SKB data buffer to allocate
* @data_token: data token returned (skb handle for storing in buffer desc)
* @ch: RX channel number
*
* Called during RX channel setup - allocates skb buffer of required size
* and provides the skb handle and allocated buffer data pointer to caller
*
* Returns skb data pointer or 0 on failure to alloc skb
*/
static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
void **data_token, u32 ch)
{
struct net_device *ndev = priv->ndev;
struct device *emac_dev = &ndev->dev;
struct sk_buff *p_skb;
p_skb = dev_alloc_skb(buf_size);
if (unlikely(NULL == p_skb)) {
if (netif_msg_rx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: failed to alloc skb");
return NULL;
}
/* set device pointer in skb and reserve space for extra bytes */
p_skb->dev = ndev;
skb_reserve(p_skb, NET_IP_ALIGN);
*data_token = (void *) p_skb;
return p_skb->data;
}
/**
* emac_init_rxch: RX channel initialization
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @param: mac address for RX channel
*
* Called during device init to setup a RX channel (allocate buffers and
* buffer descriptors, create queue and keep ready for reception
*
* Returns success(0) or mem alloc failures error code
*/
static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param)
{
struct device *emac_dev = &priv->ndev->dev;
u32 cnt, bd_size;
void __iomem *mem;
struct emac_rx_bd __iomem *curr_bd;
struct emac_rxch *rxch = NULL;
rxch = kzalloc(sizeof(struct emac_rxch), GFP_KERNEL);
if (NULL == rxch) {
dev_err(emac_dev, "DaVinci EMAC: RX Ch mem alloc failed");
return -ENOMEM;
}
priv->rxch[ch] = rxch;
rxch->buf_size = priv->rx_buf_size;
rxch->service_max = EMAC_DEF_RX_MAX_SERVICE;
rxch->queue_active = 0;
rxch->teardown_pending = 0;
/* save mac address */
for (cnt = 0; cnt < 6; cnt++)
rxch->mac_addr[cnt] = param[cnt];
/* allocate buffer descriptor pool align every BD on four word
* boundry for future requirements */
bd_size = (sizeof(struct emac_rx_bd) + 0xF) & ~0xF;
rxch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
rxch->alloc_size = (((bd_size * rxch->num_bd) + 0xF) & ~0xF);
rxch->bd_mem = EMAC_RX_BD_MEM(priv);
__memzero((void __force *)rxch->bd_mem, rxch->alloc_size);
rxch->pkt_queue.buf_list = &rxch->buf_queue;
/* allocate RX buffer and initialize the BD linked list */
mem = (void __force __iomem *)
(((u32 __force) rxch->bd_mem + 0xF) & ~0xF);
rxch->active_queue_head = NULL;
rxch->active_queue_tail = mem;
for (cnt = 0; cnt < rxch->num_bd; cnt++) {
curr_bd = mem + (cnt * bd_size);
/* for future use the last parameter contains the BD ptr */
curr_bd->data_ptr = emac_net_alloc_rx_buf(priv,
rxch->buf_size,
(void __force **)&curr_bd->buf_token,
EMAC_DEF_RX_CH);
if (curr_bd->data_ptr == NULL) {
dev_err(emac_dev, "DaVinci EMAC: RX buf mem alloc " \
"failed for ch %d\n", ch);
kfree(rxch);
return -ENOMEM;
}
/* populate the hardware descriptor */
curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head,
priv);
curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr,
rxch->buf_size, DMA_FROM_DEVICE);
curr_bd->off_b_len = rxch->buf_size;
curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
/* write back to hardware memory */
BD_CACHE_WRITEBACK_INVALIDATE((u32) curr_bd,
EMAC_BD_LENGTH_FOR_CACHE);
curr_bd->next = rxch->active_queue_head;
rxch->active_queue_head = curr_bd;
}
/* At this point rxCppi->activeQueueHead points to the first
RX BD ready to be given to RX HDP and rxch->active_queue_tail
points to the last RX BD
*/
return 0;
}
/**
* emac_rxch_teardown: RX channel teardown
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
*
* Called during device stop to teardown RX channel
*
*/
static void emac_rxch_teardown(struct emac_priv *priv, u32 ch)
{
struct device *emac_dev = &priv->ndev->dev;
u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
while ((emac_read(EMAC_RXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
EMAC_TEARDOWN_VALUE) {
/* wait till tx teardown complete */
cpu_relax(); /* TODO: check if this helps ... */
--teardown_cnt;
if (0 == teardown_cnt) {
dev_err(emac_dev, "EMAC: RX teardown aborted\n");
break;
}
}
emac_write(EMAC_RXCP(ch), EMAC_TEARDOWN_VALUE);
}
/**
* emac_stop_rxch: Stop RX channel operation
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
*
* Called during device stop to stop RX channel operation
*
*/
static void emac_stop_rxch(struct emac_priv *priv, u32 ch)
{
struct emac_rxch *rxch = priv->rxch[ch];
if (rxch) {
rxch->teardown_pending = 1;
emac_write(EMAC_RXTEARDOWN, ch);
/* wait for teardown complete */
emac_rxch_teardown(priv, ch);
rxch->teardown_pending = 0;
emac_write(EMAC_RXINTMASKCLEAR, BIT(ch));
}
}
/**
* emac_cleanup_rxch: Book-keep function to clean RX channel resources
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
*
* Called during device stop to clean up RX channel resources
*
*/
static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch)
{
struct emac_rxch *rxch = priv->rxch[ch];
struct emac_rx_bd __iomem *curr_bd;
if (rxch) {
/* free the receive buffers previously allocated */
curr_bd = rxch->active_queue_head;
while (curr_bd) {
if (curr_bd->buf_token) {
dma_unmap_single(&priv->ndev->dev,
curr_bd->buff_ptr,
curr_bd->off_b_len
& EMAC_RX_BD_BUF_SIZE,
DMA_FROM_DEVICE);
dev_kfree_skb_any((struct sk_buff *)\
curr_bd->buf_token);
}
curr_bd = curr_bd->next;
}
if (rxch->bd_mem)
rxch->bd_mem = NULL;
kfree(rxch);
priv->rxch[ch] = NULL;
}
}
/**
* emac_set_type0addr: Set EMAC Type0 mac address
* @priv: The DaVinci EMAC private adapter structure
......@@ -1948,7 +1242,6 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
{
struct emac_priv *priv = netdev_priv(ndev);
struct emac_rxch *rxch = priv->rxch[EMAC_DEF_RX_CH];
struct device *emac_dev = &priv->ndev->dev;
struct sockaddr *sa = addr;
......@@ -1959,11 +1252,10 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
/* If the interface is down - rxch is NULL. */
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
}
if (netif_msg_drv(priv))
......@@ -1973,194 +1265,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
return 0;
}
/**
* emac_addbd_to_rx_queue: Recycle RX buffer descriptor
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number to process buffer descriptors for
* @curr_bd: current buffer descriptor
* @buffer: buffer pointer for descriptor
* @buf_token: buffer token (stores skb information)
*
* Prepares the recycled buffer descriptor and addes it to hardware
* receive queue - if queue empty this descriptor becomes the head
* else addes the descriptor to end of queue
*
*/
static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
struct emac_rx_bd __iomem *curr_bd,
char *buffer, void *buf_token)
{
struct emac_rxch *rxch = priv->rxch[ch];
/* populate the hardware descriptor */
curr_bd->h_next = 0;
curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer,
rxch->buf_size, DMA_FROM_DEVICE);
curr_bd->off_b_len = rxch->buf_size;
curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
curr_bd->next = NULL;
curr_bd->data_ptr = buffer;
curr_bd->buf_token = buf_token;
/* write back */
BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
if (rxch->active_queue_head == NULL) {
rxch->active_queue_head = curr_bd;
rxch->active_queue_tail = curr_bd;
if (0 != rxch->queue_active) {
emac_write(EMAC_RXHDP(ch),
emac_virt_to_phys(rxch->active_queue_head, priv));
rxch->queue_active = 1;
}
} else {
struct emac_rx_bd __iomem *tail_bd;
u32 frame_status;
tail_bd = rxch->active_queue_tail;
rxch->active_queue_tail = curr_bd;
tail_bd->next = curr_bd;
tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
tail_bd->h_next = emac_virt_to_phys(curr_bd, priv);
frame_status = tail_bd->mode;
if (frame_status & EMAC_CPPI_EOQ_BIT) {
emac_write(EMAC_RXHDP(ch),
emac_virt_to_phys(curr_bd, priv));
frame_status &= ~(EMAC_CPPI_EOQ_BIT);
tail_bd->mode = frame_status;
++rxch->end_of_queue_add;
}
}
++rxch->recycled_bd;
}
/**
* emac_net_rx_cb: Prepares packet and sends to upper layer
* @priv: The DaVinci EMAC private adapter structure
* @net_pkt_list: Network packet list (received packets)
*
* Invalidates packet buffer memory and sends the received packet to upper
* layer
*
* Returns success or appropriate error code (none as of now)
*/
static int emac_net_rx_cb(struct emac_priv *priv,
struct emac_netpktobj *net_pkt_list)
{
struct net_device *ndev = priv->ndev;
struct sk_buff *p_skb = net_pkt_list->pkt_token;
/* set length of packet */
skb_put(p_skb, net_pkt_list->pkt_length);
p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
netif_receive_skb(p_skb);
ndev->stats.rx_bytes += net_pkt_list->pkt_length;
ndev->stats.rx_packets++;
return 0;
}
/**
* emac_rx_bdproc: RX buffer descriptor (packet) processing
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number to process buffer descriptors for
* @budget: number of packets allowed to process
* @pending: indication to caller that packets are pending to process
*
* Processes RX buffer descriptors - checks ownership bit on the RX buffer
* descriptor, sends the receive packet to upper layer, allocates a new SKB
* and recycles the buffer descriptor (requeues it in hardware RX queue).
* Only "budget" number of packets are processed and indication of pending
* packets provided to the caller.
*
* Returns number of packets processed (and indication of pending packets)
*/
static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
{
unsigned long flags;
u32 frame_status;
u32 pkts_processed = 0;
char *new_buffer;
struct emac_rx_bd __iomem *curr_bd;
struct emac_rx_bd __iomem *last_bd;
struct emac_netpktobj *curr_pkt, pkt_obj;
struct emac_netbufobj buf_obj;
struct emac_netbufobj *rx_buf_obj;
void *new_buf_token;
struct emac_rxch *rxch = priv->rxch[ch];
if (unlikely(1 == rxch->teardown_pending))
return 0;
++rxch->proc_count;
spin_lock_irqsave(&priv->rx_lock, flags);
pkt_obj.buf_list = &buf_obj;
curr_pkt = &pkt_obj;
curr_bd = rxch->active_queue_head;
BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
frame_status = curr_bd->mode;
while ((curr_bd) &&
((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
(pkts_processed < budget)) {
new_buffer = emac_net_alloc_rx_buf(priv, rxch->buf_size,
&new_buf_token, EMAC_DEF_RX_CH);
if (unlikely(NULL == new_buffer)) {
++rxch->out_of_rx_buffers;
goto end_emac_rx_bdproc;
}
/* populate received packet data structure */
rx_buf_obj = &curr_pkt->buf_list[0];
rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
rx_buf_obj->buf_token = curr_bd->buf_token;
dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr,
curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
DMA_FROM_DEVICE);
curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
curr_pkt->num_bufs = 1;
curr_pkt->pkt_length =
(frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd, priv));
++rxch->processed_bd;
last_bd = curr_bd;
curr_bd = last_bd->next;
rxch->active_queue_head = curr_bd;
/* check if end of RX queue ? */
if (frame_status & EMAC_CPPI_EOQ_BIT) {
if (curr_bd) {
++rxch->mis_queued_packets;
emac_write(EMAC_RXHDP(ch),
emac_virt_to_phys(curr_bd, priv));
} else {
++rxch->end_of_queue;
rxch->queue_active = 0;
}
}
/* recycle BD */
emac_addbd_to_rx_queue(priv, ch, last_bd, new_buffer,
new_buf_token);
/* return the packet to the user - BD ptr passed in
* last parameter for potential *future* use */
spin_unlock_irqrestore(&priv->rx_lock, flags);
emac_net_rx_cb(priv, curr_pkt);
spin_lock_irqsave(&priv->rx_lock, flags);
curr_bd = rxch->active_queue_head;
if (curr_bd) {
BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
frame_status = curr_bd->mode;
}
++pkts_processed;
}
end_emac_rx_bdproc:
spin_unlock_irqrestore(&priv->rx_lock, flags);
return pkts_processed;
}
/**
* emac_hw_enable: Enable EMAC hardware for packet transmission/reception
* @priv: The DaVinci EMAC private adapter structure
......@@ -2172,7 +1276,7 @@ static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
*/
static int emac_hw_enable(struct emac_priv *priv)
{
u32 ch, val, mbp_enable, mac_control;
u32 val, mbp_enable, mac_control;
/* Soft reset */
emac_write(EMAC_SOFTRESET, 1);
......@@ -2215,26 +1319,9 @@ static int emac_hw_enable(struct emac_priv *priv)
emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
val = emac_read(EMAC_TXCONTROL);
val |= EMAC_TX_CONTROL_TX_ENABLE_VAL;
emac_write(EMAC_TXCONTROL, val);
val = emac_read(EMAC_RXCONTROL);
val |= EMAC_RX_CONTROL_RX_ENABLE_VAL;
emac_write(EMAC_RXCONTROL, val);
emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
for (ch = 0; ch < EMAC_DEF_MAX_TX_CH; ch++) {
emac_write(EMAC_TXHDP(ch), 0);
emac_write(EMAC_TXINTMASKSET, BIT(ch));
}
for (ch = 0; ch < EMAC_DEF_MAX_RX_CH; ch++) {
struct emac_rxch *rxch = priv->rxch[ch];
emac_setmac(priv, ch, rxch->mac_addr);
emac_write(EMAC_RXINTMASKSET, BIT(ch));
rxch->queue_active = 1;
emac_write(EMAC_RXHDP(ch),
emac_virt_to_phys(rxch->active_queue_head, priv));
}
emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
/* Enable MII */
val = emac_read(EMAC_MACCONTROL);
......@@ -2279,8 +1366,8 @@ static int emac_poll(struct napi_struct *napi, int budget)
mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
if (status & mask) {
num_tx_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
EMAC_DEF_TX_MAX_SERVICE);
num_tx_pkts = cpdma_chan_process(priv->txchan,
EMAC_DEF_TX_MAX_SERVICE);
} /* TX processing */
mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
......@@ -2289,7 +1376,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
if (status & mask) {
num_rx_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget);
num_rx_pkts = cpdma_chan_process(priv->rxchan, budget);
} /* RX processing */
mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
......@@ -2348,79 +1435,6 @@ void emac_poll_controller(struct net_device *ndev)
}
#endif
/* PHY/MII bus related */
/* Wait until mdio is ready for next command */
#define MDIO_WAIT_FOR_USER_ACCESS\
while ((emac_mdio_read((MDIO_USERACCESS(0))) &\
MDIO_USERACCESS_GO) != 0)
static int emac_mii_read(struct mii_bus *bus, int phy_id, int phy_reg)
{
unsigned int phy_data = 0;
unsigned int phy_control;
/* Wait until mdio is ready for next command */
MDIO_WAIT_FOR_USER_ACCESS;
phy_control = (MDIO_USERACCESS_GO |
MDIO_USERACCESS_READ |
((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
(phy_data & MDIO_USERACCESS_DATA));
emac_mdio_write(MDIO_USERACCESS(0), phy_control);
/* Wait until mdio is ready for next command */
MDIO_WAIT_FOR_USER_ACCESS;
return emac_mdio_read(MDIO_USERACCESS(0)) & MDIO_USERACCESS_DATA;
}
static int emac_mii_write(struct mii_bus *bus, int phy_id,
int phy_reg, u16 phy_data)
{
unsigned int control;
/* until mdio is ready for next command */
MDIO_WAIT_FOR_USER_ACCESS;
control = (MDIO_USERACCESS_GO |
MDIO_USERACCESS_WRITE |
((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
(phy_data & MDIO_USERACCESS_DATA));
emac_mdio_write(MDIO_USERACCESS(0), control);
return 0;
}
static int emac_mii_reset(struct mii_bus *bus)
{
unsigned int clk_div;
int mdio_bus_freq = emac_bus_frequency;
if (mdio_max_freq && mdio_bus_freq)
clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
else
clk_div = 0xFF;
clk_div &= MDIO_CONTROL_CLKDIV;
/* Set enable and clock divider in MDIOControl */
emac_mdio_write(MDIO_CONTROL, (clk_div | MDIO_CONTROL_ENABLE));
return 0;
}
static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, PHY_POLL };
/* emac_driver: EMAC MII bus structure */
static struct mii_bus *emac_mii;
static void emac_adjust_link(struct net_device *ndev)
{
struct emac_priv *priv = netdev_priv(ndev);
......@@ -2485,6 +1499,11 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
return -EOPNOTSUPP;
}
static int match_first_device(struct device *dev, void *data)
{
return 1;
}
/**
* emac_dev_open: EMAC device open
* @ndev: The DaVinci EMAC network adapter
......@@ -2498,10 +1517,9 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
static int emac_dev_open(struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
u32 rc, cnt, ch;
int phy_addr;
u32 cnt;
struct resource *res;
int q, m;
int q, m, ret;
int i = 0;
int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
......@@ -2513,29 +1531,21 @@ static int emac_dev_open(struct net_device *ndev)
/* Configuration items */
priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
/* Clear basic hardware */
for (ch = 0; ch < EMAC_MAX_TXRX_CHANNELS; ch++) {
emac_write(EMAC_TXHDP(ch), 0);
emac_write(EMAC_RXHDP(ch), 0);
emac_write(EMAC_RXHDP(ch), 0);
emac_write(EMAC_RXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
emac_write(EMAC_TXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
}
priv->mac_hash1 = 0;
priv->mac_hash2 = 0;
emac_write(EMAC_MACHASH1, 0);
emac_write(EMAC_MACHASH2, 0);
/* multi ch not supported - open 1 TX, 1RX ch by default */
rc = emac_init_txch(priv, EMAC_DEF_TX_CH);
if (0 != rc) {
dev_err(emac_dev, "DaVinci EMAC: emac_init_txch() failed");
return rc;
}
rc = emac_init_rxch(priv, EMAC_DEF_RX_CH, priv->mac_addr);
if (0 != rc) {
dev_err(emac_dev, "DaVinci EMAC: emac_init_rxch() failed");
return rc;
for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) {
struct sk_buff *skb = emac_rx_alloc(priv);
if (!skb)
break;
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
skb_tailroom(skb), GFP_KERNEL);
if (WARN_ON(ret < 0))
break;
}
/* Request IRQ */
......@@ -2560,28 +1570,28 @@ static int emac_dev_open(struct net_device *ndev)
emac_set_coalesce(ndev, &coal);
}
/* find the first phy */
cpdma_ctlr_start(priv->dma);
priv->phydev = NULL;
if (priv->phy_mask) {
emac_mii_reset(priv->mii_bus);
for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
if (priv->mii_bus->phy_map[phy_addr]) {
priv->phydev = priv->mii_bus->phy_map[phy_addr];
break;
}
}
/* use the first phy on the bus if pdata did not give us a phy id */
if (!priv->phy_id) {
struct device *phy;
if (!priv->phydev) {
printk(KERN_ERR "%s: no PHY found\n", ndev->name);
return -1;
}
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
match_first_device);
if (phy)
priv->phy_id = dev_name(phy);
}
priv->phydev = phy_connect(ndev, dev_name(&priv->phydev->dev),
&emac_adjust_link, 0, PHY_INTERFACE_MODE_MII);
if (priv->phy_id && *priv->phy_id) {
priv->phydev = phy_connect(ndev, priv->phy_id,
&emac_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n",
ndev->name);
dev_err(emac_dev, "could not connect to phy %s\n",
priv->phy_id);
priv->phydev = NULL;
return PTR_ERR(priv->phydev);
}
......@@ -2589,12 +1599,13 @@ static int emac_dev_open(struct net_device *ndev)
priv->speed = 0;
priv->duplex = ~0;
printk(KERN_INFO "%s: attached PHY driver [%s] "
"(mii_bus:phy_addr=%s, id=%x)\n", ndev->name,
dev_info(emac_dev, "attached PHY driver [%s] "
"(mii_bus:phy_addr=%s, id=%x)\n",
priv->phydev->drv->name, dev_name(&priv->phydev->dev),
priv->phydev->phy_id);
} else{
} else {
/* No PHY , fix the link, speed and duplex settings */
dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
priv->link = 1;
priv->speed = SPEED_100;
priv->duplex = DUPLEX_FULL;
......@@ -2607,7 +1618,7 @@ static int emac_dev_open(struct net_device *ndev)
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
if (priv->phy_mask)
if (priv->phydev)
phy_start(priv->phydev);
return 0;
......@@ -2648,10 +1659,7 @@ static int emac_dev_stop(struct net_device *ndev)
netif_carrier_off(ndev);
emac_int_disable(priv);
emac_stop_txch(priv, EMAC_DEF_TX_CH);
emac_stop_rxch(priv, EMAC_DEF_RX_CH);
emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
emac_cleanup_rxch(priv, EMAC_DEF_RX_CH);
cpdma_ctlr_stop(priv->dma);
emac_write(EMAC_SOFTRESET, 1);
if (priv->phydev)
......@@ -2756,9 +1764,10 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev;
struct emac_priv *priv;
unsigned long size;
unsigned long size, hw_ram_addr;
struct emac_platform_data *pdata;
struct device *emac_dev;
struct cpdma_params dma_params;
/* obtain emac clock from kernel */
emac_clk = clk_get(&pdev->dev, NULL);
......@@ -2782,8 +1791,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG);
spin_lock_init(&priv->tx_lock);
spin_lock_init(&priv->rx_lock);
spin_lock_init(&priv->lock);
pdata = pdev->dev.platform_data;
......@@ -2794,7 +1801,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
/* MAC addr and PHY mask , RMII enable info from platform_data */
memcpy(priv->mac_addr, pdata->mac_addr, 6);
priv->phy_mask = pdata->phy_mask;
priv->phy_id = pdata->phy_id;
priv->rmii_en = pdata->rmii_en;
priv->version = pdata->version;
priv->int_enable = pdata->interrupt_enable;
......@@ -2831,14 +1838,41 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
ndev->base_addr = (unsigned long)priv->remap_addr;
priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
priv->ctrl_ram_size = pdata->ctrl_ram_size;
priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset;
if (pdata->hw_ram_addr)
priv->hw_ram_addr = pdata->hw_ram_addr;
else
priv->hw_ram_addr = (u32 __force)res->start +
pdata->ctrl_ram_offset;
hw_ram_addr = pdata->hw_ram_addr;
if (!hw_ram_addr)
hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
memset(&dma_params, 0, sizeof(dma_params));
dma_params.dev = emac_dev;
dma_params.dmaregs = priv->emac_base;
dma_params.rxthresh = priv->emac_base + 0x120;
dma_params.rxfree = priv->emac_base + 0x140;
dma_params.txhdp = priv->emac_base + 0x600;
dma_params.rxhdp = priv->emac_base + 0x620;
dma_params.txcp = priv->emac_base + 0x640;
dma_params.rxcp = priv->emac_base + 0x660;
dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
dma_params.desc_mem_phys = hw_ram_addr;
dma_params.desc_mem_size = pdata->ctrl_ram_size;
dma_params.desc_align = 16;
priv->dma = cpdma_ctlr_create(&dma_params);
if (!priv->dma) {
dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
rc = -ENOMEM;
goto no_dma;
}
priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
emac_tx_handler);
priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH),
emac_rx_handler);
if (WARN_ON(!priv->txchan || !priv->rxchan)) {
rc = -ENOMEM;
goto no_irq_res;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
......@@ -2871,32 +1905,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
}
/* MII/Phy intialisation, mdio bus registration */
emac_mii = mdiobus_alloc();
if (emac_mii == NULL) {
dev_err(emac_dev, "DaVinci EMAC: Error allocating mii_bus\n");
rc = -ENOMEM;
goto mdio_alloc_err;
}
priv->mii_bus = emac_mii;
emac_mii->name = "emac-mii",
emac_mii->read = emac_mii_read,
emac_mii->write = emac_mii_write,
emac_mii->reset = emac_mii_reset,
emac_mii->irq = mii_irqs,
emac_mii->phy_mask = ~(priv->phy_mask);
emac_mii->parent = &pdev->dev;
emac_mii->priv = priv->remap_addr + pdata->mdio_reg_offset;
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", priv->pdev->id);
mdio_max_freq = pdata->mdio_max_freq;
emac_mii->reset(emac_mii);
/* Register the MII bus */
rc = mdiobus_register(emac_mii);
if (rc)
goto mdiobus_quit;
if (netif_msg_probe(priv)) {
dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
"(regs: %p, irq: %d)\n",
......@@ -2904,13 +1912,15 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
}
return 0;
mdiobus_quit:
mdiobus_free(emac_mii);
netdev_reg_err:
mdio_alloc_err:
clk_disable(emac_clk);
no_irq_res:
if (priv->txchan)
cpdma_chan_destroy(priv->txchan);
if (priv->rxchan)
cpdma_chan_destroy(priv->rxchan);
cpdma_ctlr_destroy(priv->dma);
no_dma:
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, res->end - res->start + 1);
iounmap(priv->remap_addr);
......@@ -2938,8 +1948,12 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
if (priv->txchan)
cpdma_chan_destroy(priv->txchan);
if (priv->rxchan)
cpdma_chan_destroy(priv->rxchan);
cpdma_ctlr_destroy(priv->dma);
release_mem_region(res->start, res->end - res->start + 1);
......
/*
* DaVinci MDIO Module driver
*
* Copyright (C) 2010 Texas Instruments.
*
* Shamelessly ripped out of davinci_emac.c, original copyrights follow:
*
* Copyright (C) 2009 Texas Instruments.
*
* ---------------------------------------------------------------------------
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
* ---------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/phy.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/davinci_emac.h>
/*
* This timeout definition is a worst-case ultra defensive measure against
* unexpected controller lock ups. Ideally, we should never ever hit this
* scenario in practice.
*/
#define MDIO_TIMEOUT 100 /* msecs */
#define PHY_REG_MASK 0x1f
#define PHY_ID_MASK 0x1f
#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
struct davinci_mdio_regs {
u32 version;
u32 control;
#define CONTROL_IDLE BIT(31)
#define CONTROL_ENABLE BIT(30)
#define CONTROL_MAX_DIV (0xff)
u32 alive;
u32 link;
u32 linkintraw;
u32 linkintmasked;
u32 __reserved_0[2];
u32 userintraw;
u32 userintmasked;
u32 userintmaskset;
u32 userintmaskclr;
u32 __reserved_1[20];
struct {
u32 access;
#define USERACCESS_GO BIT(31)
#define USERACCESS_WRITE BIT(30)
#define USERACCESS_ACK BIT(29)
#define USERACCESS_READ (0)
#define USERACCESS_DATA (0xffff)
u32 physel;
} user[0];
};
struct mdio_platform_data default_pdata = {
.bus_freq = DEF_OUT_FREQ,
};
struct davinci_mdio_data {
struct mdio_platform_data pdata;
struct davinci_mdio_regs __iomem *regs;
spinlock_t lock;
struct clk *clk;
struct device *dev;
struct mii_bus *bus;
bool suspended;
unsigned long access_time; /* jiffies */
};
static void __davinci_mdio_reset(struct davinci_mdio_data *data)
{
u32 mdio_in, div, mdio_out_khz, access_time;
mdio_in = clk_get_rate(data->clk);
div = (mdio_in / data->pdata.bus_freq) - 1;
if (div > CONTROL_MAX_DIV)
div = CONTROL_MAX_DIV;
/* set enable and clock divider */
__raw_writel(div | CONTROL_ENABLE, &data->regs->control);
/*
* One mdio transaction consists of:
* 32 bits of preamble
* 32 bits of transferred data
* 24 bits of bus yield (not needed unless shared?)
*/
mdio_out_khz = mdio_in / (1000 * (div + 1));
access_time = (88 * 1000) / mdio_out_khz;
/*
* In the worst case, we could be kicking off a user-access immediately
* after the mdio bus scan state-machine triggered its own read. If
* so, our request could get deferred by one access cycle. We
* defensively allow for 4 access cycles.
*/
data->access_time = usecs_to_jiffies(access_time * 4);
if (!data->access_time)
data->access_time = 1;
}
static int davinci_mdio_reset(struct mii_bus *bus)
{
struct davinci_mdio_data *data = bus->priv;
u32 phy_mask, ver;
__davinci_mdio_reset(data);
/* wait for scan logic to settle */
msleep(PHY_MAX_ADDR * data->access_time);
/* dump hardware version info */
ver = __raw_readl(&data->regs->version);
dev_info(data->dev, "davinci mdio revision %d.%d\n",
(ver >> 8) & 0xff, ver & 0xff);
/* get phy mask from the alive register */
phy_mask = __raw_readl(&data->regs->alive);
if (phy_mask) {
/* restrict mdio bus to live phys only */
dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
phy_mask = ~phy_mask;
} else {
/* desperately scan all phys */
dev_warn(data->dev, "no live phy, scanning all\n");
phy_mask = 0;
}
data->bus->phy_mask = phy_mask;
return 0;
}
/* wait until hardware is ready for another user access */
static inline int wait_for_user_access(struct davinci_mdio_data *data)
{
struct davinci_mdio_regs __iomem *regs = data->regs;
unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
u32 reg;
while (time_after(timeout, jiffies)) {
reg = __raw_readl(&regs->user[0].access);
if ((reg & USERACCESS_GO) == 0)
return 0;
reg = __raw_readl(&regs->control);
if ((reg & CONTROL_IDLE) == 0)
continue;
/*
* An emac soft_reset may have clobbered the mdio controller's
* state machine. We need to reset and retry the current
* operation
*/
dev_warn(data->dev, "resetting idled controller\n");
__davinci_mdio_reset(data);
return -EAGAIN;
}
dev_err(data->dev, "timed out waiting for user access\n");
return -ETIMEDOUT;
}
/* wait until hardware state machine is idle */
static inline int wait_for_idle(struct davinci_mdio_data *data)
{
struct davinci_mdio_regs __iomem *regs = data->regs;
unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
while (time_after(timeout, jiffies)) {
if (__raw_readl(&regs->control) & CONTROL_IDLE)
return 0;
}
dev_err(data->dev, "timed out waiting for idle\n");
return -ETIMEDOUT;
}
static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
{
struct davinci_mdio_data *data = bus->priv;
u32 reg;
int ret;
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
spin_lock(&data->lock);
if (data->suspended) {
spin_unlock(&data->lock);
return -ENODEV;
}
reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
(phy_id << 16));
while (1) {
ret = wait_for_user_access(data);
if (ret == -EAGAIN)
continue;
if (ret < 0)
break;
__raw_writel(reg, &data->regs->user[0].access);
ret = wait_for_user_access(data);
if (ret == -EAGAIN)
continue;
if (ret < 0)
break;
reg = __raw_readl(&data->regs->user[0].access);
ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
break;
}
spin_unlock(&data->lock);
return ret;
}
static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
int phy_reg, u16 phy_data)
{
struct davinci_mdio_data *data = bus->priv;
u32 reg;
int ret;
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
spin_lock(&data->lock);
if (data->suspended) {
spin_unlock(&data->lock);
return -ENODEV;
}
reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
(phy_id << 16) | (phy_data & USERACCESS_DATA));
while (1) {
ret = wait_for_user_access(data);
if (ret == -EAGAIN)
continue;
if (ret < 0)
break;
__raw_writel(reg, &data->regs->user[0].access);
ret = wait_for_user_access(data);
if (ret == -EAGAIN)
continue;
break;
}
spin_unlock(&data->lock);
return 0;
}
static int __devinit davinci_mdio_probe(struct platform_device *pdev)
{
struct mdio_platform_data *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct davinci_mdio_data *data;
struct resource *res;
struct phy_device *phy;
int ret, addr;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
dev_err(dev, "failed to alloc device data\n");
return -ENOMEM;
}
data->pdata = pdata ? (*pdata) : default_pdata;
data->bus = mdiobus_alloc();
if (!data->bus) {
dev_err(dev, "failed to alloc mii bus\n");
ret = -ENOMEM;
goto bail_out;
}
data->bus->name = dev_name(dev);
data->bus->read = davinci_mdio_read,
data->bus->write = davinci_mdio_write,
data->bus->reset = davinci_mdio_reset,
data->bus->parent = dev;
data->bus->priv = data;
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
data->clk = clk_get(dev, NULL);
if (IS_ERR(data->clk)) {
data->clk = NULL;
dev_err(dev, "failed to get device clock\n");
ret = PTR_ERR(data->clk);
goto bail_out;
}
clk_enable(data->clk);
dev_set_drvdata(dev, data);
data->dev = dev;
spin_lock_init(&data->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "could not find register map resource\n");
ret = -ENOENT;
goto bail_out;
}
res = devm_request_mem_region(dev, res->start, resource_size(res),
dev_name(dev));
if (!res) {
dev_err(dev, "could not allocate register map resource\n");
ret = -ENXIO;
goto bail_out;
}
data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
if (!data->regs) {
dev_err(dev, "could not map mdio registers\n");
ret = -ENOMEM;
goto bail_out;
}
/* register the mii bus */
ret = mdiobus_register(data->bus);
if (ret)
goto bail_out;
/* scan and dump the bus */
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
phy = data->bus->phy_map[addr];
if (phy) {
dev_info(dev, "phy[%d]: device %s, driver %s\n",
phy->addr, dev_name(&phy->dev),
phy->drv ? phy->drv->name : "unknown");
}
}
return 0;
bail_out:
if (data->bus)
mdiobus_free(data->bus);
if (data->clk) {
clk_disable(data->clk);
clk_put(data->clk);
}
kfree(data);
return ret;
}
static int __devexit davinci_mdio_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct davinci_mdio_data *data = dev_get_drvdata(dev);
if (data->bus)
mdiobus_free(data->bus);
if (data->clk) {
clk_disable(data->clk);
clk_put(data->clk);
}
dev_set_drvdata(dev, NULL);
kfree(data);
return 0;
}
static int davinci_mdio_suspend(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
u32 ctrl;
spin_lock(&data->lock);
/* shutdown the scan state machine */
ctrl = __raw_readl(&data->regs->control);
ctrl &= ~CONTROL_ENABLE;
__raw_writel(ctrl, &data->regs->control);
wait_for_idle(data);
if (data->clk)
clk_disable(data->clk);
data->suspended = true;
spin_unlock(&data->lock);
return 0;
}
static int davinci_mdio_resume(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
u32 ctrl;
spin_lock(&data->lock);
if (data->clk)
clk_enable(data->clk);
/* restart the scan state machine */
ctrl = __raw_readl(&data->regs->control);
ctrl |= CONTROL_ENABLE;
__raw_writel(ctrl, &data->regs->control);
data->suspended = false;
spin_unlock(&data->lock);
return 0;
}
static const struct dev_pm_ops davinci_mdio_pm_ops = {
.suspend = davinci_mdio_suspend,
.resume = davinci_mdio_resume,
};
static struct platform_driver davinci_mdio_driver = {
.driver = {
.name = "davinci_mdio",
.owner = THIS_MODULE,
.pm = &davinci_mdio_pm_ops,
},
.probe = davinci_mdio_probe,
.remove = __devexit_p(davinci_mdio_remove),
};
static int __init davinci_mdio_init(void)
{
return platform_driver_register(&davinci_mdio_driver);
}
device_initcall(davinci_mdio_init);
static void __exit davinci_mdio_exit(void)
{
platform_driver_unregister(&davinci_mdio_driver);
}
module_exit(davinci_mdio_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DaVinci MDIO driver");
......@@ -14,16 +14,26 @@
#include <linux/if_ether.h>
#include <linux/memory.h>
struct mdio_platform_data {
unsigned long bus_freq;
};
struct emac_platform_data {
char mac_addr[ETH_ALEN];
u32 ctrl_reg_offset;
u32 ctrl_mod_reg_offset;
u32 ctrl_ram_offset;
u32 hw_ram_addr;
u32 mdio_reg_offset;
u32 ctrl_ram_size;
u32 phy_mask;
u32 mdio_max_freq;
/*
* phy_id can be one of the following:
* - NULL : use the first phy on the bus,
* - "" : force to 100/full, no mdio control
* - "<bus>:<addr>" : use the specified bus and phy
*/
const char *phy_id;
u8 rmii_en;
u8 version;
void (*interrupt_enable) (void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment