Commit 2098c18d authored by Krzysztof Hałasa's avatar Krzysztof Hałasa

IXP4xx: Add PHYLIB support to Ethernet driver.

Signed-off-by: default avatarKrzysztof Hałasa <khc@pm.waw.pl>
parent b4c7d3b0
...@@ -59,7 +59,7 @@ config EP93XX_ETH ...@@ -59,7 +59,7 @@ config EP93XX_ETH
config IXP4XX_ETH config IXP4XX_ETH
tristate "Intel IXP4xx Ethernet support" tristate "Intel IXP4xx Ethernet support"
depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
select MII select PHYLIB
help help
Say Y here if you want to use built-in Ethernet ports Say Y here if you want to use built-in Ethernet ports
on IXP4xx processor. on IXP4xx processor.
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mii.h> #include <linux/phy.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <mach/npe.h> #include <mach/npe.h>
#include <mach/qmgr.h> #include <mach/qmgr.h>
...@@ -59,7 +59,6 @@ ...@@ -59,7 +59,6 @@
#define NAPI_WEIGHT 16 #define NAPI_WEIGHT 16
#define MDIO_INTERVAL (3 * HZ) #define MDIO_INTERVAL (3 * HZ)
#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */
#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
#define NPE_ID(port_id) ((port_id) >> 4) #define NPE_ID(port_id) ((port_id) >> 4)
...@@ -164,14 +163,13 @@ struct port { ...@@ -164,14 +163,13 @@ struct port {
struct npe *npe; struct npe *npe;
struct net_device *netdev; struct net_device *netdev;
struct napi_struct napi; struct napi_struct napi;
struct mii_if_info mii; struct phy_device *phydev;
struct delayed_work mdio_thread;
struct eth_plat_info *plat; struct eth_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */ struct desc *desc_tab; /* coherent */
u32 desc_tab_phys; u32 desc_tab_phys;
int id; /* logical port ID */ int id; /* logical port ID */
u16 mii_bmcr; int speed, duplex;
}; };
/* NPE message structure */ /* NPE message structure */
...@@ -242,19 +240,20 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) ...@@ -242,19 +240,20 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
static spinlock_t mdio_lock; static spinlock_t mdio_lock;
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
struct mii_bus *mdio_bus;
static int ports_open; static int ports_open;
static struct port *npe_port_tab[MAX_NPES]; static struct port *npe_port_tab[MAX_NPES];
static struct dma_pool *dma_pool; static struct dma_pool *dma_pool;
static u16 mdio_cmd(struct net_device *dev, int phy_id, int location, static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
int write, u16 cmd) int write, u16 cmd)
{ {
int cycles = 0; int cycles = 0;
if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name); printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
return 0; return -1;
} }
if (write) { if (write) {
...@@ -273,107 +272,119 @@ static u16 mdio_cmd(struct net_device *dev, int phy_id, int location, ...@@ -273,107 +272,119 @@ static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
} }
if (cycles == MAX_MDIO_RETRIES) { if (cycles == MAX_MDIO_RETRIES) {
printk(KERN_ERR "%s: MII write failed\n", dev->name); printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
return 0; phy_id);
return -1;
} }
#if DEBUG_MDIO #if DEBUG_MDIO
printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name, printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
cycles); phy_id, write ? "write" : "read", cycles);
#endif #endif
if (write) if (write)
return 0; return 0;
if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
printk(KERN_ERR "%s: MII read failed\n", dev->name); #if DEBUG_MDIO
return 0; printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
phy_id);
#endif
return 0xFFFF; /* don't return error */
} }
return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
(__raw_readl(&mdio_regs->mdio_status[1]) << 8); ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
} }
static int mdio_read(struct net_device *dev, int phy_id, int location) static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
{ {
unsigned long flags; unsigned long flags;
u16 val; int ret;
spin_lock_irqsave(&mdio_lock, flags); spin_lock_irqsave(&mdio_lock, flags);
val = mdio_cmd(dev, phy_id, location, 0, 0); ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
spin_unlock_irqrestore(&mdio_lock, flags); spin_unlock_irqrestore(&mdio_lock, flags);
return val; #if DEBUG_MDIO
printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
phy_id, location, ret);
#endif
return ret;
} }
static void mdio_write(struct net_device *dev, int phy_id, int location, static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
int val) u16 val)
{ {
unsigned long flags; unsigned long flags;
int ret;
spin_lock_irqsave(&mdio_lock, flags); spin_lock_irqsave(&mdio_lock, flags);
mdio_cmd(dev, phy_id, location, 1, val); ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
spin_unlock_irqrestore(&mdio_lock, flags); spin_unlock_irqrestore(&mdio_lock, flags);
#if DEBUG_MDIO
printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n",
bus->name, phy_id, location, val, ret);
#endif
return ret;
} }
static void phy_reset(struct net_device *dev, int phy_id) static int ixp4xx_mdio_register(void)
{ {
struct port *port = netdev_priv(dev); int err;
int cycles = 0;
if (!(mdio_bus = mdiobus_alloc()))
return -ENOMEM;
mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET); /* All MII PHY accesses use NPE-B Ethernet registers */
spin_lock_init(&mdio_lock);
mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
while (cycles < MAX_MII_RESET_RETRIES) { mdio_bus->name = "IXP4xx MII Bus";
if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) { mdio_bus->read = &ixp4xx_mdio_read;
#if DEBUG_MDIO mdio_bus->write = &ixp4xx_mdio_write;
printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n", strcpy(mdio_bus->id, "0");
dev->name, cycles);
#endif
return;
}
udelay(1);
cycles++;
}
printk(KERN_ERR "%s: MII reset failed\n", dev->name); if ((err = mdiobus_register(mdio_bus)))
mdiobus_free(mdio_bus);
return err;
} }
static void eth_set_duplex(struct port *port) static void ixp4xx_mdio_remove(void)
{ {
if (port->mii.full_duplex) mdiobus_unregister(mdio_bus);
__raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, mdiobus_free(mdio_bus);
&port->regs->tx_control[0]);
else
__raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
&port->regs->tx_control[0]);
} }
static void phy_check_media(struct port *port, int init) static void ixp4xx_adjust_link(struct net_device *dev)
{ {
if (mii_check_media(&port->mii, 1, init)) struct port *port = netdev_priv(dev);
eth_set_duplex(port); struct phy_device *phydev = port->phydev;
if (port->mii.force_media) { /* mii_check_media() doesn't work */
struct net_device *dev = port->netdev; if (!phydev->link) {
int cur_link = mii_link_ok(&port->mii); if (port->speed) {
int prev_link = netif_carrier_ok(dev); port->speed = 0;
if (!prev_link && cur_link) {
printk(KERN_INFO "%s: link up\n", dev->name);
netif_carrier_on(dev);
} else if (prev_link && !cur_link) {
printk(KERN_INFO "%s: link down\n", dev->name); printk(KERN_INFO "%s: link down\n", dev->name);
netif_carrier_off(dev);
} }
return;
} }
}
if (port->speed == phydev->speed && port->duplex == phydev->duplex)
return;
static void mdio_thread(struct work_struct *work) port->speed = phydev->speed;
{ port->duplex = phydev->duplex;
struct port *port = container_of(work, struct port, mdio_thread.work);
phy_check_media(port, 0); if (port->duplex)
schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL); __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
&port->regs->tx_control[0]);
else
__raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
&port->regs->tx_control[0]);
printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
dev->name, port->speed, port->duplex ? "full" : "half");
} }
...@@ -777,16 +788,9 @@ static void eth_set_mcast_list(struct net_device *dev) ...@@ -777,16 +788,9 @@ static void eth_set_mcast_list(struct net_device *dev)
static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{ {
struct port *port = netdev_priv(dev);
unsigned int duplex_chg;
int err;
if (!netif_running(dev)) if (!netif_running(dev))
return -EINVAL; return -EINVAL;
err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg); return -EINVAL;
if (duplex_chg)
eth_set_duplex(port);
return err;
} }
...@@ -938,8 +942,6 @@ static int eth_open(struct net_device *dev) ...@@ -938,8 +942,6 @@ static int eth_open(struct net_device *dev)
} }
} }
mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
memset(&msg, 0, sizeof(msg)); memset(&msg, 0, sizeof(msg));
msg.cmd = NPE_VLAN_SETRXQOSENTRY; msg.cmd = NPE_VLAN_SETRXQOSENTRY;
msg.eth_id = port->id; msg.eth_id = port->id;
...@@ -977,6 +979,9 @@ static int eth_open(struct net_device *dev) ...@@ -977,6 +979,9 @@ static int eth_open(struct net_device *dev)
return err; return err;
} }
port->speed = 0; /* force "link up" message */
phy_start(port->phydev);
for (i = 0; i < ETH_ALEN; i++) for (i = 0; i < ETH_ALEN; i++)
__raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
__raw_writel(0x08, &port->regs->random_seed); __raw_writel(0x08, &port->regs->random_seed);
...@@ -1004,10 +1009,8 @@ static int eth_open(struct net_device *dev) ...@@ -1004,10 +1009,8 @@ static int eth_open(struct net_device *dev)
__raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
napi_enable(&port->napi); napi_enable(&port->napi);
phy_check_media(port, 1);
eth_set_mcast_list(dev); eth_set_mcast_list(dev);
netif_start_queue(dev); netif_start_queue(dev);
schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
eth_rx_irq, dev); eth_rx_irq, dev);
...@@ -1098,14 +1101,10 @@ static int eth_close(struct net_device *dev) ...@@ -1098,14 +1101,10 @@ static int eth_close(struct net_device *dev)
printk(KERN_CRIT "%s: unable to disable loopback\n", printk(KERN_CRIT "%s: unable to disable loopback\n",
dev->name); dev->name);
port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) & phy_stop(port->phydev);
~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
mdio_write(dev, port->plat->phy, MII_BMCR,
port->mii_bmcr | BMCR_PDOWN);
if (!ports_open) if (!ports_open)
qmgr_disable_irq(TXDONE_QUEUE); qmgr_disable_irq(TXDONE_QUEUE);
cancel_rearming_delayed_work(&port->mdio_thread);
destroy_queues(port); destroy_queues(port);
release_queues(port); release_queues(port);
return 0; return 0;
...@@ -1117,6 +1116,7 @@ static int __devinit eth_init_one(struct platform_device *pdev) ...@@ -1117,6 +1116,7 @@ static int __devinit eth_init_one(struct platform_device *pdev)
struct net_device *dev; struct net_device *dev;
struct eth_plat_info *plat = pdev->dev.platform_data; struct eth_plat_info *plat = pdev->dev.platform_data;
u32 regs_phys; u32 regs_phys;
char phy_id[BUS_ID_SIZE];
int err; int err;
if (!(dev = alloc_etherdev(sizeof(struct port)))) if (!(dev = alloc_etherdev(sizeof(struct port))))
...@@ -1182,22 +1182,19 @@ static int __devinit eth_init_one(struct platform_device *pdev) ...@@ -1182,22 +1182,19 @@ static int __devinit eth_init_one(struct platform_device *pdev)
__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
udelay(50); udelay(50);
port->mii.dev = dev; snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy);
port->mii.mdio_read = mdio_read; port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
port->mii.mdio_write = mdio_write; PHY_INTERFACE_MODE_MII);
port->mii.phy_id = plat->phy; if (IS_ERR(port->phydev)) {
port->mii.phy_id_mask = 0x1F; printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
port->mii.reg_num_mask = 0x1F; return PTR_ERR(port->phydev);
}
port->phydev->irq = PHY_POLL;
printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
npe_name(port->npe)); npe_name(port->npe));
phy_reset(dev, plat->phy);
port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
~(BMCR_RESET | BMCR_PDOWN);
mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
return 0; return 0;
err_unreg: err_unreg:
...@@ -1231,20 +1228,19 @@ static struct platform_driver ixp4xx_eth_driver = { ...@@ -1231,20 +1228,19 @@ static struct platform_driver ixp4xx_eth_driver = {
static int __init eth_init_module(void) static int __init eth_init_module(void)
{ {
int err;
if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
return -ENOSYS; return -ENOSYS;
/* All MII PHY accesses use NPE-B Ethernet registers */ if ((err = ixp4xx_mdio_register()))
spin_lock_init(&mdio_lock); return err;
mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
return platform_driver_register(&ixp4xx_eth_driver); return platform_driver_register(&ixp4xx_eth_driver);
} }
static void __exit eth_cleanup_module(void) static void __exit eth_cleanup_module(void)
{ {
platform_driver_unregister(&ixp4xx_eth_driver); platform_driver_unregister(&ixp4xx_eth_driver);
ixp4xx_mdio_remove();
} }
MODULE_AUTHOR("Krzysztof Halasa"); MODULE_AUTHOR("Krzysztof Halasa");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment