Commit 30b8817f authored by David S. Miller's avatar David S. Miller

Merge branch 'net-coding-style'

Weihang Li says:

====================
net: fix some coding style issues

Do some cleanups according to the coding style of kernel, including wrong
print type, redundant and missing spaces and so on.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3cbf7530 44d043b5
...@@ -863,7 +863,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) ...@@ -863,7 +863,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
if (unlikely(i == timeout)) { if (unlikely(i == timeout)) {
netdev_err(ena_dev->net_device, netdev_err(ena_dev->net_device,
"Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
mmio_read->seq_num, offset, read_resp->req_id, mmio_read->seq_num, offset, read_resp->req_id,
read_resp->reg_off); read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT; ret = ENA_MMIO_READ_TIMEOUT;
...@@ -2396,7 +2396,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, ...@@ -2396,7 +2396,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (key) { if (key) {
if (key_len != sizeof(hash_key->key)) { if (key_len != sizeof(hash_key->key)) {
netdev_err(ena_dev->net_device, netdev_err(ena_dev->net_device,
"key len (%hu) doesn't equal the supported size (%zu)\n", "key len (%u) doesn't equal the supported size (%zu)\n",
key_len, sizeof(hash_key->key)); key_len, sizeof(hash_key->key));
return -EINVAL; return -EINVAL;
} }
......
...@@ -3975,7 +3975,7 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, ...@@ -3975,7 +3975,7 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num); max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num); max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num); max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */ /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
if (unlikely(!max_num_io_queues)) { if (unlikely(!max_num_io_queues)) {
dev_err(&pdev->dev, "The device doesn't have io queues\n"); dev_err(&pdev->dev, "The device doesn't have io queues\n");
......
...@@ -100,19 +100,19 @@ static int amd8111e_read_phy(struct amd8111e_priv *lp, ...@@ -100,19 +100,19 @@ static int amd8111e_read_phy(struct amd8111e_priv *lp,
{ {
void __iomem *mmio = lp->mmio; void __iomem *mmio = lp->mmio;
unsigned int reg_val; unsigned int reg_val;
unsigned int repeat= REPEAT_CNT; unsigned int repeat = REPEAT_CNT;
reg_val = readl(mmio + PHY_ACCESS); reg_val = readl(mmio + PHY_ACCESS);
while (reg_val & PHY_CMD_ACTIVE) while (reg_val & PHY_CMD_ACTIVE)
reg_val = readl( mmio + PHY_ACCESS ); reg_val = readl(mmio + PHY_ACCESS);
writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) | writel(PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
((reg & 0x1f) << 16), mmio +PHY_ACCESS); ((reg & 0x1f) << 16), mmio + PHY_ACCESS);
do{ do {
reg_val = readl(mmio + PHY_ACCESS); reg_val = readl(mmio + PHY_ACCESS);
udelay(30); /* It takes 30 us to read/write data */ udelay(30); /* It takes 30 us to read/write data */
} while (--repeat && (reg_val & PHY_CMD_ACTIVE)); } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
if(reg_val & PHY_RD_ERR) if (reg_val & PHY_RD_ERR)
goto err_phy_read; goto err_phy_read;
*val = reg_val & 0xffff; *val = reg_val & 0xffff;
...@@ -133,17 +133,17 @@ static int amd8111e_write_phy(struct amd8111e_priv *lp, ...@@ -133,17 +133,17 @@ static int amd8111e_write_phy(struct amd8111e_priv *lp,
reg_val = readl(mmio + PHY_ACCESS); reg_val = readl(mmio + PHY_ACCESS);
while (reg_val & PHY_CMD_ACTIVE) while (reg_val & PHY_CMD_ACTIVE)
reg_val = readl( mmio + PHY_ACCESS ); reg_val = readl(mmio + PHY_ACCESS);
writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) | writel(PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS); ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
do{ do {
reg_val = readl(mmio + PHY_ACCESS); reg_val = readl(mmio + PHY_ACCESS);
udelay(30); /* It takes 30 us to read/write the data */ udelay(30); /* It takes 30 us to read/write the data */
} while (--repeat && (reg_val & PHY_CMD_ACTIVE)); } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
if(reg_val & PHY_RD_ERR) if (reg_val & PHY_RD_ERR)
goto err_phy_write; goto err_phy_write;
return 0; return 0;
...@@ -159,7 +159,7 @@ static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num) ...@@ -159,7 +159,7 @@ static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
struct amd8111e_priv *lp = netdev_priv(dev); struct amd8111e_priv *lp = netdev_priv(dev);
unsigned int reg_val; unsigned int reg_val;
amd8111e_read_phy(lp,phy_id,reg_num,&reg_val); amd8111e_read_phy(lp, phy_id, reg_num, &reg_val);
return reg_val; return reg_val;
} }
...@@ -179,17 +179,17 @@ static void amd8111e_mdio_write(struct net_device *dev, ...@@ -179,17 +179,17 @@ static void amd8111e_mdio_write(struct net_device *dev,
static void amd8111e_set_ext_phy(struct net_device *dev) static void amd8111e_set_ext_phy(struct net_device *dev)
{ {
struct amd8111e_priv *lp = netdev_priv(dev); struct amd8111e_priv *lp = netdev_priv(dev);
u32 bmcr,advert,tmp; u32 bmcr, advert, tmp;
/* Determine mii register values to set the speed */ /* Determine mii register values to set the speed */
advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE); advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
switch (lp->ext_phy_option){ switch (lp->ext_phy_option) {
default: default:
case SPEED_AUTONEG: /* advertise all values */ case SPEED_AUTONEG: /* advertise all values */
tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL| tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF|ADVERTISE_100FULL) ; ADVERTISE_100HALF | ADVERTISE_100FULL);
break; break;
case SPEED10_HALF: case SPEED10_HALF:
tmp |= ADVERTISE_10HALF; tmp |= ADVERTISE_10HALF;
...@@ -224,20 +224,20 @@ static int amd8111e_free_skbs(struct net_device *dev) ...@@ -224,20 +224,20 @@ static int amd8111e_free_skbs(struct net_device *dev)
int i; int i;
/* Freeing transmit skbs */ /* Freeing transmit skbs */
for(i = 0; i < NUM_TX_BUFFERS; i++){ for (i = 0; i < NUM_TX_BUFFERS; i++) {
if(lp->tx_skbuff[i]){ if (lp->tx_skbuff[i]) {
dma_unmap_single(&lp->pci_dev->dev, dma_unmap_single(&lp->pci_dev->dev,
lp->tx_dma_addr[i], lp->tx_dma_addr[i],
lp->tx_skbuff[i]->len, DMA_TO_DEVICE); lp->tx_skbuff[i]->len, DMA_TO_DEVICE);
dev_kfree_skb (lp->tx_skbuff[i]); dev_kfree_skb(lp->tx_skbuff[i]);
lp->tx_skbuff[i] = NULL; lp->tx_skbuff[i] = NULL;
lp->tx_dma_addr[i] = 0; lp->tx_dma_addr[i] = 0;
} }
} }
/* Freeing previously allocated receive buffers */ /* Freeing previously allocated receive buffers */
for (i = 0; i < NUM_RX_BUFFERS; i++){ for (i = 0; i < NUM_RX_BUFFERS; i++) {
rx_skbuff = lp->rx_skbuff[i]; rx_skbuff = lp->rx_skbuff[i];
if(rx_skbuff != NULL){ if (rx_skbuff != NULL) {
dma_unmap_single(&lp->pci_dev->dev, dma_unmap_single(&lp->pci_dev->dev,
lp->rx_dma_addr[i], lp->rx_dma_addr[i],
lp->rx_buff_len - 2, DMA_FROM_DEVICE); lp->rx_buff_len - 2, DMA_FROM_DEVICE);
...@@ -258,13 +258,13 @@ static inline void amd8111e_set_rx_buff_len(struct net_device *dev) ...@@ -258,13 +258,13 @@ static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
struct amd8111e_priv *lp = netdev_priv(dev); struct amd8111e_priv *lp = netdev_priv(dev);
unsigned int mtu = dev->mtu; unsigned int mtu = dev->mtu;
if (mtu > ETH_DATA_LEN){ if (mtu > ETH_DATA_LEN) {
/* MTU + ethernet header + FCS /* MTU + ethernet header + FCS
* + optional VLAN tag + skb reserve space 2 * + optional VLAN tag + skb reserve space 2
*/ */
lp->rx_buff_len = mtu + ETH_HLEN + 10; lp->rx_buff_len = mtu + ETH_HLEN + 10;
lp->options |= OPTION_JUMBO_ENABLE; lp->options |= OPTION_JUMBO_ENABLE;
} else{ } else {
lp->rx_buff_len = PKT_BUFF_SZ; lp->rx_buff_len = PKT_BUFF_SZ;
lp->options &= ~OPTION_JUMBO_ENABLE; lp->options &= ~OPTION_JUMBO_ENABLE;
} }
...@@ -285,11 +285,11 @@ static int amd8111e_init_ring(struct net_device *dev) ...@@ -285,11 +285,11 @@ static int amd8111e_init_ring(struct net_device *dev)
lp->tx_ring_idx = 0; lp->tx_ring_idx = 0;
if(lp->opened) if (lp->opened)
/* Free previously allocated transmit and receive skbs */ /* Free previously allocated transmit and receive skbs */
amd8111e_free_skbs(dev); amd8111e_free_skbs(dev);
else{ else {
/* allocate the tx and rx descriptors */ /* allocate the tx and rx descriptors */
lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev, lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR, sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
...@@ -313,11 +313,11 @@ static int amd8111e_init_ring(struct net_device *dev) ...@@ -313,11 +313,11 @@ static int amd8111e_init_ring(struct net_device *dev)
lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len); lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
if (!lp->rx_skbuff[i]) { if (!lp->rx_skbuff[i]) {
/* Release previos allocated skbs */ /* Release previos allocated skbs */
for(--i; i >= 0 ;i--) for (--i; i >= 0; i--)
dev_kfree_skb(lp->rx_skbuff[i]); dev_kfree_skb(lp->rx_skbuff[i]);
goto err_free_rx_ring; goto err_free_rx_ring;
} }
skb_reserve(lp->rx_skbuff[i],2); skb_reserve(lp->rx_skbuff[i], 2);
} }
/* Initilaizing receive descriptors */ /* Initilaizing receive descriptors */
for (i = 0; i < NUM_RX_BUFFERS; i++) { for (i = 0; i < NUM_RX_BUFFERS; i++) {
...@@ -375,40 +375,40 @@ static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod) ...@@ -375,40 +375,40 @@ static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
case RX_INTR_COAL : case RX_INTR_COAL :
timeout = coal_conf->rx_timeout; timeout = coal_conf->rx_timeout;
event_count = coal_conf->rx_event_count; event_count = coal_conf->rx_event_count;
if( timeout > MAX_TIMEOUT || if (timeout > MAX_TIMEOUT ||
event_count > MAX_EVENT_COUNT ) event_count > MAX_EVENT_COUNT)
return -EINVAL; return -EINVAL;
timeout = timeout * DELAY_TIMER_CONV; timeout = timeout * DELAY_TIMER_CONV;
writel(VAL0|STINTEN, mmio+INTEN0); writel(VAL0|STINTEN, mmio+INTEN0);
writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout, writel((u32)DLY_INT_A_R0 | (event_count << 16) |
mmio+DLY_INT_A); timeout, mmio + DLY_INT_A);
break; break;
case TX_INTR_COAL : case TX_INTR_COAL:
timeout = coal_conf->tx_timeout; timeout = coal_conf->tx_timeout;
event_count = coal_conf->tx_event_count; event_count = coal_conf->tx_event_count;
if( timeout > MAX_TIMEOUT || if (timeout > MAX_TIMEOUT ||
event_count > MAX_EVENT_COUNT ) event_count > MAX_EVENT_COUNT)
return -EINVAL; return -EINVAL;
timeout = timeout * DELAY_TIMER_CONV; timeout = timeout * DELAY_TIMER_CONV;
writel(VAL0|STINTEN,mmio+INTEN0); writel(VAL0 | STINTEN, mmio + INTEN0);
writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout, writel((u32)DLY_INT_B_T0 | (event_count << 16) |
mmio+DLY_INT_B); timeout, mmio + DLY_INT_B);
break; break;
case DISABLE_COAL: case DISABLE_COAL:
writel(0,mmio+STVAL); writel(0, mmio + STVAL);
writel(STINTEN, mmio+INTEN0); writel(STINTEN, mmio + INTEN0);
writel(0, mmio +DLY_INT_B); writel(0, mmio + DLY_INT_B);
writel(0, mmio+DLY_INT_A); writel(0, mmio + DLY_INT_A);
break; break;
case ENABLE_COAL: case ENABLE_COAL:
/* Start the timer */ /* Start the timer */
writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */ writel((u32)SOFT_TIMER_FREQ, mmio + STVAL); /* 0.5 sec */
writel(VAL0|STINTEN, mmio+INTEN0); writel(VAL0 | STINTEN, mmio + INTEN0);
break; break;
default: default:
break; break;
...@@ -423,67 +423,67 @@ static int amd8111e_restart(struct net_device *dev) ...@@ -423,67 +423,67 @@ static int amd8111e_restart(struct net_device *dev)
{ {
struct amd8111e_priv *lp = netdev_priv(dev); struct amd8111e_priv *lp = netdev_priv(dev);
void __iomem *mmio = lp->mmio; void __iomem *mmio = lp->mmio;
int i,reg_val; int i, reg_val;
/* stop the chip */ /* stop the chip */
writel(RUN, mmio + CMD0); writel(RUN, mmio + CMD0);
if(amd8111e_init_ring(dev)) if (amd8111e_init_ring(dev))
return -ENOMEM; return -ENOMEM;
/* enable the port manager and set auto negotiation always */ /* enable the port manager and set auto negotiation always */
writel((u32) VAL1|EN_PMGR, mmio + CMD3 ); writel((u32)VAL1 | EN_PMGR, mmio + CMD3);
writel((u32)XPHYANE|XPHYRST , mmio + CTRL2); writel((u32)XPHYANE | XPHYRST, mmio + CTRL2);
amd8111e_set_ext_phy(dev); amd8111e_set_ext_phy(dev);
/* set control registers */ /* set control registers */
reg_val = readl(mmio + CTRL1); reg_val = readl(mmio + CTRL1);
reg_val &= ~XMTSP_MASK; reg_val &= ~XMTSP_MASK;
writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 ); writel(reg_val | XMTSP_128 | CACHE_ALIGN, mmio + CTRL1);
/* enable interrupt */ /* enable interrupt */
writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN | writel(APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN | APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0); SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0); writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
/* initialize tx and rx ring base addresses */ /* initialize tx and rx ring base addresses */
writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0); writel((u32)lp->tx_ring_dma_addr, mmio + XMT_RING_BASE_ADDR0);
writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0); writel((u32)lp->rx_ring_dma_addr, mmio + RCV_RING_BASE_ADDR0);
writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0); writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0); writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
/* set default IPG to 96 */ /* set default IPG to 96 */
writew((u32)DEFAULT_IPG,mmio+IPG); writew((u32)DEFAULT_IPG, mmio + IPG);
writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1); writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
if(lp->options & OPTION_JUMBO_ENABLE){ if (lp->options & OPTION_JUMBO_ENABLE) {
writel((u32)VAL2|JUMBO, mmio + CMD3); writel((u32)VAL2|JUMBO, mmio + CMD3);
/* Reset REX_UFLO */ /* Reset REX_UFLO */
writel( REX_UFLO, mmio + CMD2); writel(REX_UFLO, mmio + CMD2);
/* Should not set REX_UFLO for jumbo frames */ /* Should not set REX_UFLO for jumbo frames */
writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2); writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
}else{ } else {
writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2); writel(VAL0 | APAD_XMT | REX_RTRY | REX_UFLO, mmio + CMD2);
writel((u32)JUMBO, mmio + CMD3); writel((u32)JUMBO, mmio + CMD3);
} }
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3); writel((u32)VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3);
#endif #endif
writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 ); writel(VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2);
/* Setting the MAC address to the device */ /* Setting the MAC address to the device */
for (i = 0; i < ETH_ALEN; i++) for (i = 0; i < ETH_ALEN; i++)
writeb( dev->dev_addr[i], mmio + PADR + i ); writeb(dev->dev_addr[i], mmio + PADR + i);
/* Enable interrupt coalesce */ /* Enable interrupt coalesce */
if(lp->options & OPTION_INTR_COAL_ENABLE){ if (lp->options & OPTION_INTR_COAL_ENABLE) {
netdev_info(dev, "Interrupt Coalescing Enabled.\n"); netdev_info(dev, "Interrupt Coalescing Enabled.\n");
amd8111e_set_coalesce(dev,ENABLE_COAL); amd8111e_set_coalesce(dev, ENABLE_COAL);
} }
/* set RUN bit to start the chip */ /* set RUN bit to start the chip */
...@@ -499,7 +499,7 @@ static int amd8111e_restart(struct net_device *dev) ...@@ -499,7 +499,7 @@ static int amd8111e_restart(struct net_device *dev)
static void amd8111e_init_hw_default(struct amd8111e_priv *lp) static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
{ {
unsigned int reg_val; unsigned int reg_val;
unsigned int logic_filter[2] ={0,}; unsigned int logic_filter[2] = {0,};
void __iomem *mmio = lp->mmio; void __iomem *mmio = lp->mmio;
...@@ -519,13 +519,13 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp) ...@@ -519,13 +519,13 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
writel(0, mmio + XMT_RING_BASE_ADDR3); writel(0, mmio + XMT_RING_BASE_ADDR3);
/* Clear CMD0 */ /* Clear CMD0 */
writel(CMD0_CLEAR,mmio + CMD0); writel(CMD0_CLEAR, mmio + CMD0);
/* Clear CMD2 */ /* Clear CMD2 */
writel(CMD2_CLEAR, mmio +CMD2); writel(CMD2_CLEAR, mmio + CMD2);
/* Clear CMD7 */ /* Clear CMD7 */
writel(CMD7_CLEAR , mmio + CMD7); writel(CMD7_CLEAR, mmio + CMD7);
/* Clear DLY_INT_A and DLY_INT_B */ /* Clear DLY_INT_A and DLY_INT_B */
writel(0x0, mmio + DLY_INT_A); writel(0x0, mmio + DLY_INT_A);
...@@ -542,13 +542,13 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp) ...@@ -542,13 +542,13 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
writel(0x0, mmio + STVAL); writel(0x0, mmio + STVAL);
/* Clear INTEN0 */ /* Clear INTEN0 */
writel( INTEN0_CLEAR, mmio + INTEN0); writel(INTEN0_CLEAR, mmio + INTEN0);
/* Clear LADRF */ /* Clear LADRF */
writel(0x0 , mmio + LADRF); writel(0x0, mmio + LADRF);
/* Set SRAM_SIZE & SRAM_BOUNDARY registers */ /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
writel( 0x80010,mmio + SRAM_SIZE); writel(0x80010, mmio + SRAM_SIZE);
/* Clear RCV_RING0_LEN */ /* Clear RCV_RING0_LEN */
writel(0x0, mmio + RCV_RING_LEN0); writel(0x0, mmio + RCV_RING_LEN0);
...@@ -571,10 +571,10 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp) ...@@ -571,10 +571,10 @@ static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
/* SRAM_SIZE register */ /* SRAM_SIZE register */
reg_val = readl(mmio + SRAM_SIZE); reg_val = readl(mmio + SRAM_SIZE);
if(lp->options & OPTION_JUMBO_ENABLE) if (lp->options & OPTION_JUMBO_ENABLE)
writel( VAL2|JUMBO, mmio + CMD3); writel(VAL2 | JUMBO, mmio + CMD3);
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 ); writel(VAL2 | VSIZE | VL_TAG_DEL, mmio + CMD3);
#endif #endif
/* Set default value to CTRL1 Register */ /* Set default value to CTRL1 Register */
writel(CTRL1_DEFAULT, mmio + CTRL1); writel(CTRL1_DEFAULT, mmio + CTRL1);
...@@ -616,14 +616,14 @@ static void amd8111e_stop_chip(struct amd8111e_priv *lp) ...@@ -616,14 +616,14 @@ static void amd8111e_stop_chip(struct amd8111e_priv *lp)
static void amd8111e_free_ring(struct amd8111e_priv *lp) static void amd8111e_free_ring(struct amd8111e_priv *lp)
{ {
/* Free transmit and receive descriptor rings */ /* Free transmit and receive descriptor rings */
if(lp->rx_ring){ if (lp->rx_ring) {
dma_free_coherent(&lp->pci_dev->dev, dma_free_coherent(&lp->pci_dev->dev,
sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR, sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
lp->rx_ring, lp->rx_ring_dma_addr); lp->rx_ring, lp->rx_ring_dma_addr);
lp->rx_ring = NULL; lp->rx_ring = NULL;
} }
if(lp->tx_ring){ if (lp->tx_ring) {
dma_free_coherent(&lp->pci_dev->dev, dma_free_coherent(&lp->pci_dev->dev,
sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR, sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
lp->tx_ring, lp->tx_ring_dma_addr); lp->tx_ring, lp->tx_ring_dma_addr);
...@@ -643,11 +643,11 @@ static int amd8111e_tx(struct net_device *dev) ...@@ -643,11 +643,11 @@ static int amd8111e_tx(struct net_device *dev)
int tx_index; int tx_index;
int status; int status;
/* Complete all the transmit packet */ /* Complete all the transmit packet */
while (lp->tx_complete_idx != lp->tx_idx){ while (lp->tx_complete_idx != lp->tx_idx) {
tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK; tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags); status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
if(status & OWN_BIT) if (status & OWN_BIT)
break; /* It still hasn't been Txed */ break; /* It still hasn't been Txed */
lp->tx_ring[tx_index].buff_phy_addr = 0; lp->tx_ring[tx_index].buff_phy_addr = 0;
...@@ -669,10 +669,10 @@ static int amd8111e_tx(struct net_device *dev) ...@@ -669,10 +669,10 @@ static int amd8111e_tx(struct net_device *dev)
le16_to_cpu(lp->tx_ring[tx_index].buff_count); le16_to_cpu(lp->tx_ring[tx_index].buff_count);
if (netif_queue_stopped(dev) && if (netif_queue_stopped(dev) &&
lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){ lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS + 2) {
/* The ring is no longer full, clear tbusy. */ /* The ring is no longer full, clear tbusy. */
/* lp->tx_full = 0; */ /* lp->tx_full = 0; */
netif_wake_queue (dev); netif_wake_queue(dev);
} }
} }
return 0; return 0;
...@@ -685,7 +685,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) ...@@ -685,7 +685,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
struct net_device *dev = lp->amd8111e_net_dev; struct net_device *dev = lp->amd8111e_net_dev;
int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK; int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
void __iomem *mmio = lp->mmio; void __iomem *mmio = lp->mmio;
struct sk_buff *skb,*new_skb; struct sk_buff *skb, *new_skb;
int min_pkt_len, status; int min_pkt_len, status;
int num_rx_pkt = 0; int num_rx_pkt = 0;
short pkt_len; short pkt_len;
...@@ -710,7 +710,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) ...@@ -710,7 +710,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
goto err_next_pkt; goto err_next_pkt;
} }
/* check for STP and ENP */ /* check for STP and ENP */
if (!((status & STP_BIT) && (status & ENP_BIT))){ if (!((status & STP_BIT) && (status & ENP_BIT))) {
/* resetting flags */ /* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt; goto err_next_pkt;
...@@ -755,7 +755,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) ...@@ -755,7 +755,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
if (vtag == TT_VLAN_TAGGED){ if (vtag == TT_VLAN_TAGGED) {
u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
} }
...@@ -793,25 +793,25 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) ...@@ -793,25 +793,25 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
static int amd8111e_link_change(struct net_device *dev) static int amd8111e_link_change(struct net_device *dev)
{ {
struct amd8111e_priv *lp = netdev_priv(dev); struct amd8111e_priv *lp = netdev_priv(dev);
int status0,speed; int status0, speed;
/* read the link change */ /* read the link change */
status0 = readl(lp->mmio + STAT0); status0 = readl(lp->mmio + STAT0);
if(status0 & LINK_STATS){ if (status0 & LINK_STATS) {
if(status0 & AUTONEG_COMPLETE) if (status0 & AUTONEG_COMPLETE)
lp->link_config.autoneg = AUTONEG_ENABLE; lp->link_config.autoneg = AUTONEG_ENABLE;
else else
lp->link_config.autoneg = AUTONEG_DISABLE; lp->link_config.autoneg = AUTONEG_DISABLE;
if(status0 & FULL_DPLX) if (status0 & FULL_DPLX)
lp->link_config.duplex = DUPLEX_FULL; lp->link_config.duplex = DUPLEX_FULL;
else else
lp->link_config.duplex = DUPLEX_HALF; lp->link_config.duplex = DUPLEX_HALF;
speed = (status0 & SPEED_MASK) >> 7; speed = (status0 & SPEED_MASK) >> 7;
if(speed == PHY_SPEED_10) if (speed == PHY_SPEED_10)
lp->link_config.speed = SPEED_10; lp->link_config.speed = SPEED_10;
else if(speed == PHY_SPEED_100) else if (speed == PHY_SPEED_100)
lp->link_config.speed = SPEED_100; lp->link_config.speed = SPEED_100;
netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n", netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
...@@ -821,8 +821,7 @@ static int amd8111e_link_change(struct net_device *dev) ...@@ -821,8 +821,7 @@ static int amd8111e_link_change(struct net_device *dev)
"Full" : "Half"); "Full" : "Half");
netif_carrier_on(dev); netif_carrier_on(dev);
} } else {
else{
lp->link_config.speed = SPEED_INVALID; lp->link_config.speed = SPEED_INVALID;
lp->link_config.duplex = DUPLEX_INVALID; lp->link_config.duplex = DUPLEX_INVALID;
lp->link_config.autoneg = AUTONEG_INVALID; lp->link_config.autoneg = AUTONEG_INVALID;
...@@ -840,7 +839,7 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER) ...@@ -840,7 +839,7 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
unsigned int data; unsigned int data;
unsigned int repeat = REPEAT_CNT; unsigned int repeat = REPEAT_CNT;
writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR); writew(MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
do { do {
status = readw(mmio + MIB_ADDR); status = readw(mmio + MIB_ADDR);
udelay(2); /* controller takes MAX 2 us to get mib data */ udelay(2); /* controller takes MAX 2 us to get mib data */
...@@ -863,7 +862,7 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev) ...@@ -863,7 +862,7 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
if (!lp->opened) if (!lp->opened)
return new_stats; return new_stats;
spin_lock_irqsave (&lp->lock, flags); spin_lock_irqsave(&lp->lock, flags);
/* stats.rx_packets */ /* stats.rx_packets */
new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+ new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
...@@ -943,7 +942,7 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev) ...@@ -943,7 +942,7 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
/* Reset the mibs for collecting new statistics */ /* Reset the mibs for collecting new statistics */
/* writew(MIB_CLEAR, mmio + MIB_ADDR);*/ /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
spin_unlock_irqrestore (&lp->lock, flags); spin_unlock_irqrestore(&lp->lock, flags);
return new_stats; return new_stats;
} }
...@@ -974,96 +973,90 @@ static int amd8111e_calc_coalesce(struct net_device *dev) ...@@ -974,96 +973,90 @@ static int amd8111e_calc_coalesce(struct net_device *dev)
rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes; rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
coal_conf->rx_prev_bytes = coal_conf->rx_bytes; coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
if(rx_pkt_rate < 800){ if (rx_pkt_rate < 800) {
if(coal_conf->rx_coal_type != NO_COALESCE){ if (coal_conf->rx_coal_type != NO_COALESCE) {
coal_conf->rx_timeout = 0x0; coal_conf->rx_timeout = 0x0;
coal_conf->rx_event_count = 0; coal_conf->rx_event_count = 0;
amd8111e_set_coalesce(dev,RX_INTR_COAL); amd8111e_set_coalesce(dev, RX_INTR_COAL);
coal_conf->rx_coal_type = NO_COALESCE; coal_conf->rx_coal_type = NO_COALESCE;
} }
} } else {
else{
rx_pkt_size = rx_data_rate/rx_pkt_rate; rx_pkt_size = rx_data_rate/rx_pkt_rate;
if (rx_pkt_size < 128){ if (rx_pkt_size < 128) {
if(coal_conf->rx_coal_type != NO_COALESCE){ if (coal_conf->rx_coal_type != NO_COALESCE) {
coal_conf->rx_timeout = 0; coal_conf->rx_timeout = 0;
coal_conf->rx_event_count = 0; coal_conf->rx_event_count = 0;
amd8111e_set_coalesce(dev,RX_INTR_COAL); amd8111e_set_coalesce(dev, RX_INTR_COAL);
coal_conf->rx_coal_type = NO_COALESCE; coal_conf->rx_coal_type = NO_COALESCE;
} }
} } else if ((rx_pkt_size >= 128) && (rx_pkt_size < 512)) {
else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
if(coal_conf->rx_coal_type != LOW_COALESCE){ if (coal_conf->rx_coal_type != LOW_COALESCE) {
coal_conf->rx_timeout = 1; coal_conf->rx_timeout = 1;
coal_conf->rx_event_count = 4; coal_conf->rx_event_count = 4;
amd8111e_set_coalesce(dev,RX_INTR_COAL); amd8111e_set_coalesce(dev, RX_INTR_COAL);
coal_conf->rx_coal_type = LOW_COALESCE; coal_conf->rx_coal_type = LOW_COALESCE;
} }
} } else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)) {
else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
if(coal_conf->rx_coal_type != MEDIUM_COALESCE){ if (coal_conf->rx_coal_type != MEDIUM_COALESCE) {
coal_conf->rx_timeout = 1; coal_conf->rx_timeout = 1;
coal_conf->rx_event_count = 4; coal_conf->rx_event_count = 4;
amd8111e_set_coalesce(dev,RX_INTR_COAL); amd8111e_set_coalesce(dev, RX_INTR_COAL);
coal_conf->rx_coal_type = MEDIUM_COALESCE; coal_conf->rx_coal_type = MEDIUM_COALESCE;
} }
} } else if (rx_pkt_size >= 1024) {
else if(rx_pkt_size >= 1024){
if(coal_conf->rx_coal_type != HIGH_COALESCE){ if (coal_conf->rx_coal_type != HIGH_COALESCE) {
coal_conf->rx_timeout = 2; coal_conf->rx_timeout = 2;
coal_conf->rx_event_count = 3; coal_conf->rx_event_count = 3;
amd8111e_set_coalesce(dev,RX_INTR_COAL); amd8111e_set_coalesce(dev, RX_INTR_COAL);
coal_conf->rx_coal_type = HIGH_COALESCE; coal_conf->rx_coal_type = HIGH_COALESCE;
} }
} }
} }
/* NOW FOR TX INTR COALESC */ /* NOW FOR TX INTR COALESC */
if(tx_pkt_rate < 800){ if (tx_pkt_rate < 800) {
if(coal_conf->tx_coal_type != NO_COALESCE){ if (coal_conf->tx_coal_type != NO_COALESCE) {
coal_conf->tx_timeout = 0x0; coal_conf->tx_timeout = 0x0;
coal_conf->tx_event_count = 0; coal_conf->tx_event_count = 0;
amd8111e_set_coalesce(dev,TX_INTR_COAL); amd8111e_set_coalesce(dev, TX_INTR_COAL);
coal_conf->tx_coal_type = NO_COALESCE; coal_conf->tx_coal_type = NO_COALESCE;
} }
} } else {
else{
tx_pkt_size = tx_data_rate/tx_pkt_rate; tx_pkt_size = tx_data_rate/tx_pkt_rate;
if (tx_pkt_size < 128){ if (tx_pkt_size < 128) {
if(coal_conf->tx_coal_type != NO_COALESCE){ if (coal_conf->tx_coal_type != NO_COALESCE) {
coal_conf->tx_timeout = 0; coal_conf->tx_timeout = 0;
coal_conf->tx_event_count = 0; coal_conf->tx_event_count = 0;
amd8111e_set_coalesce(dev,TX_INTR_COAL); amd8111e_set_coalesce(dev, TX_INTR_COAL);
coal_conf->tx_coal_type = NO_COALESCE; coal_conf->tx_coal_type = NO_COALESCE;
} }
} } else if ((tx_pkt_size >= 128) && (tx_pkt_size < 512)) {
else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
if(coal_conf->tx_coal_type != LOW_COALESCE){ if (coal_conf->tx_coal_type != LOW_COALESCE) {
coal_conf->tx_timeout = 1; coal_conf->tx_timeout = 1;
coal_conf->tx_event_count = 2; coal_conf->tx_event_count = 2;
amd8111e_set_coalesce(dev,TX_INTR_COAL); amd8111e_set_coalesce(dev, TX_INTR_COAL);
coal_conf->tx_coal_type = LOW_COALESCE; coal_conf->tx_coal_type = LOW_COALESCE;
} }
} } else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)) {
else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
if(coal_conf->tx_coal_type != MEDIUM_COALESCE){ if (coal_conf->tx_coal_type != MEDIUM_COALESCE) {
coal_conf->tx_timeout = 2; coal_conf->tx_timeout = 2;
coal_conf->tx_event_count = 5; coal_conf->tx_event_count = 5;
amd8111e_set_coalesce(dev,TX_INTR_COAL); amd8111e_set_coalesce(dev, TX_INTR_COAL);
coal_conf->tx_coal_type = MEDIUM_COALESCE; coal_conf->tx_coal_type = MEDIUM_COALESCE;
} }
} else if (tx_pkt_size >= 1024) { } else if (tx_pkt_size >= 1024) {
...@@ -1091,7 +1084,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) ...@@ -1091,7 +1084,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
unsigned int intr0, intren0; unsigned int intr0, intren0;
unsigned int handled = 1; unsigned int handled = 1;
if(unlikely(dev == NULL)) if (unlikely(dev == NULL))
return IRQ_NONE; return IRQ_NONE;
spin_lock(&lp->lock); spin_lock(&lp->lock);
...@@ -1105,7 +1098,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) ...@@ -1105,7 +1098,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Process all the INT event until INTR bit is clear. */ /* Process all the INT event until INTR bit is clear. */
if (!(intr0 & INTR)){ if (!(intr0 & INTR)) {
handled = 0; handled = 0;
goto err_no_interrupt; goto err_no_interrupt;
} }
...@@ -1140,7 +1133,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id) ...@@ -1140,7 +1133,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
amd8111e_calc_coalesce(dev); amd8111e_calc_coalesce(dev);
err_no_interrupt: err_no_interrupt:
writel( VAL0 | INTREN,mmio + CMD0); writel(VAL0 | INTREN, mmio + CMD0);
spin_unlock(&lp->lock); spin_unlock(&lp->lock);
...@@ -1180,7 +1173,7 @@ static int amd8111e_close(struct net_device *dev) ...@@ -1180,7 +1173,7 @@ static int amd8111e_close(struct net_device *dev)
netif_carrier_off(lp->amd8111e_net_dev); netif_carrier_off(lp->amd8111e_net_dev);
/* Delete ipg timer */ /* Delete ipg timer */
if(lp->options & OPTION_DYN_IPG_ENABLE) if (lp->options & OPTION_DYN_IPG_ENABLE)
del_timer_sync(&lp->ipg_data.ipg_timer); del_timer_sync(&lp->ipg_data.ipg_timer);
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
...@@ -1200,8 +1193,8 @@ static int amd8111e_open(struct net_device *dev) ...@@ -1200,8 +1193,8 @@ static int amd8111e_open(struct net_device *dev)
{ {
struct amd8111e_priv *lp = netdev_priv(dev); struct amd8111e_priv *lp = netdev_priv(dev);
if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED, if (dev->irq == 0 || request_irq(dev->irq, amd8111e_interrupt,
dev->name, dev)) IRQF_SHARED, dev->name, dev))
return -EAGAIN; return -EAGAIN;
napi_enable(&lp->napi); napi_enable(&lp->napi);
...@@ -1210,7 +1203,7 @@ static int amd8111e_open(struct net_device *dev) ...@@ -1210,7 +1203,7 @@ static int amd8111e_open(struct net_device *dev)
amd8111e_init_hw_default(lp); amd8111e_init_hw_default(lp);
if(amd8111e_restart(dev)){ if (amd8111e_restart(dev)) {
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
napi_disable(&lp->napi); napi_disable(&lp->napi);
if (dev->irq) if (dev->irq)
...@@ -1218,7 +1211,7 @@ static int amd8111e_open(struct net_device *dev) ...@@ -1218,7 +1211,7 @@ static int amd8111e_open(struct net_device *dev)
return -ENOMEM; return -ENOMEM;
} }
/* Start ipg timer */ /* Start ipg timer */
if(lp->options & OPTION_DYN_IPG_ENABLE){ if (lp->options & OPTION_DYN_IPG_ENABLE) {
add_timer(&lp->ipg_data.ipg_timer); add_timer(&lp->ipg_data.ipg_timer);
netdev_info(dev, "Dynamic IPG Enabled\n"); netdev_info(dev, "Dynamic IPG Enabled\n");
} }
...@@ -1289,10 +1282,10 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, ...@@ -1289,10 +1282,10 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
lp->tx_idx++; lp->tx_idx++;
/* Trigger an immediate send poll. */ /* Trigger an immediate send poll. */
writel( VAL1 | TDMD0, lp->mmio + CMD0); writel(VAL1 | TDMD0, lp->mmio + CMD0);
writel( VAL2 | RDMD0,lp->mmio + CMD0); writel(VAL2 | RDMD0, lp->mmio + CMD0);
if(amd8111e_tx_queue_avail(lp) < 0){ if (amd8111e_tx_queue_avail(lp) < 0) {
netif_stop_queue(dev); netif_stop_queue(dev);
} }
spin_unlock_irqrestore(&lp->lock, flags); spin_unlock_irqrestore(&lp->lock, flags);
...@@ -1326,15 +1319,15 @@ static void amd8111e_set_multicast_list(struct net_device *dev) ...@@ -1326,15 +1319,15 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
{ {
struct netdev_hw_addr *ha; struct netdev_hw_addr *ha;
struct amd8111e_priv *lp = netdev_priv(dev); struct amd8111e_priv *lp = netdev_priv(dev);
u32 mc_filter[2] ; u32 mc_filter[2];
int bit_num; int bit_num;
if(dev->flags & IFF_PROMISC){ if (dev->flags & IFF_PROMISC) {
writel( VAL2 | PROM, lp->mmio + CMD2); writel(VAL2 | PROM, lp->mmio + CMD2);
return; return;
} }
else else
writel( PROM, lp->mmio + CMD2); writel(PROM, lp->mmio + CMD2);
if (dev->flags & IFF_ALLMULTI || if (dev->flags & IFF_ALLMULTI ||
netdev_mc_count(dev) > MAX_FILTER_SIZE) { netdev_mc_count(dev) > MAX_FILTER_SIZE) {
/* get all multicast packet */ /* get all multicast packet */
...@@ -1439,7 +1432,7 @@ static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_ ...@@ -1439,7 +1432,7 @@ static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_
if (wol_info->wolopts & WAKE_MAGIC) if (wol_info->wolopts & WAKE_MAGIC)
lp->options |= lp->options |=
(OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE); (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
else if(wol_info->wolopts & WAKE_PHY) else if (wol_info->wolopts & WAKE_PHY)
lp->options |= lp->options |=
(OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE); (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
else else
...@@ -1464,14 +1457,14 @@ static const struct ethtool_ops ops = { ...@@ -1464,14 +1457,14 @@ static const struct ethtool_ops ops = {
* gets/sets driver speed, gets memory mapped register values, forces * gets/sets driver speed, gets memory mapped register values, forces
* auto negotiation, sets/gets WOL options for ethtool application. * auto negotiation, sets/gets WOL options for ethtool application.
*/ */
static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd) static int amd8111e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{ {
struct mii_ioctl_data *data = if_mii(ifr); struct mii_ioctl_data *data = if_mii(ifr);
struct amd8111e_priv *lp = netdev_priv(dev); struct amd8111e_priv *lp = netdev_priv(dev);
int err; int err;
u32 mii_regval; u32 mii_regval;
switch(cmd) { switch (cmd) {
case SIOCGMIIPHY: case SIOCGMIIPHY:
data->phy_id = lp->ext_phy_addr; data->phy_id = lp->ext_phy_addr;
...@@ -1511,7 +1504,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p) ...@@ -1511,7 +1504,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
spin_lock_irq(&lp->lock); spin_lock_irq(&lp->lock);
/* Setting the MAC address to the device */ /* Setting the MAC address to the device */
for (i = 0; i < ETH_ALEN; i++) for (i = 0; i < ETH_ALEN; i++)
writeb( dev->dev_addr[i], lp->mmio + PADR + i ); writeb(dev->dev_addr[i], lp->mmio + PADR + i);
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
...@@ -1543,15 +1536,15 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu) ...@@ -1543,15 +1536,15 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
err = amd8111e_restart(dev); err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
if(!err) if (!err)
netif_start_queue(dev); netif_start_queue(dev);
return err; return err;
} }
static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp) static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
{ {
writel( VAL1|MPPLBA, lp->mmio + CMD3); writel(VAL1 | MPPLBA, lp->mmio + CMD3);
writel( VAL0|MPEN_SW, lp->mmio + CMD7); writel(VAL0 | MPEN_SW, lp->mmio + CMD7);
/* To eliminate PCI posting bug */ /* To eliminate PCI posting bug */
readl(lp->mmio + CMD7); readl(lp->mmio + CMD7);
...@@ -1562,7 +1555,7 @@ static int amd8111e_enable_link_change(struct amd8111e_priv *lp) ...@@ -1562,7 +1555,7 @@ static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
{ {
/* Adapter is already stoped/suspended/interrupt-disabled */ /* Adapter is already stoped/suspended/interrupt-disabled */
writel(VAL0|LCMODE_SW,lp->mmio + CMD7); writel(VAL0 | LCMODE_SW, lp->mmio + CMD7);
/* To eliminate PCI posting bug */ /* To eliminate PCI posting bug */
readl(lp->mmio + CMD7); readl(lp->mmio + CMD7);
...@@ -1584,7 +1577,7 @@ static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue) ...@@ -1584,7 +1577,7 @@ static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
spin_lock_irq(&lp->lock); spin_lock_irq(&lp->lock);
err = amd8111e_restart(dev); err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
if(!err) if (!err)
netif_wake_queue(dev); netif_wake_queue(dev);
} }
...@@ -1605,22 +1598,21 @@ static int __maybe_unused amd8111e_suspend(struct device *dev_d) ...@@ -1605,22 +1598,21 @@ static int __maybe_unused amd8111e_suspend(struct device *dev_d)
/* stop chip */ /* stop chip */
spin_lock_irq(&lp->lock); spin_lock_irq(&lp->lock);
if(lp->options & OPTION_DYN_IPG_ENABLE) if (lp->options & OPTION_DYN_IPG_ENABLE)
del_timer_sync(&lp->ipg_data.ipg_timer); del_timer_sync(&lp->ipg_data.ipg_timer);
amd8111e_stop_chip(lp); amd8111e_stop_chip(lp);
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
if(lp->options & OPTION_WOL_ENABLE){ if (lp->options & OPTION_WOL_ENABLE) {
/* enable wol */ /* enable wol */
if(lp->options & OPTION_WAKE_MAGIC_ENABLE) if (lp->options & OPTION_WAKE_MAGIC_ENABLE)
amd8111e_enable_magicpkt(lp); amd8111e_enable_magicpkt(lp);
if(lp->options & OPTION_WAKE_PHY_ENABLE) if (lp->options & OPTION_WAKE_PHY_ENABLE)
amd8111e_enable_link_change(lp); amd8111e_enable_link_change(lp);
device_set_wakeup_enable(dev_d, 1); device_set_wakeup_enable(dev_d, 1);
} } else {
else{
device_set_wakeup_enable(dev_d, 0); device_set_wakeup_enable(dev_d, 0);
} }
...@@ -1640,7 +1632,7 @@ static int __maybe_unused amd8111e_resume(struct device *dev_d) ...@@ -1640,7 +1632,7 @@ static int __maybe_unused amd8111e_resume(struct device *dev_d)
spin_lock_irq(&lp->lock); spin_lock_irq(&lp->lock);
amd8111e_restart(dev); amd8111e_restart(dev);
/* Restart ipg timer */ /* Restart ipg timer */
if(lp->options & OPTION_DYN_IPG_ENABLE) if (lp->options & OPTION_DYN_IPG_ENABLE)
mod_timer(&lp->ipg_data.ipg_timer, mod_timer(&lp->ipg_data.ipg_timer,
jiffies + IPG_CONVERGE_JIFFIES); jiffies + IPG_CONVERGE_JIFFIES);
spin_unlock_irq(&lp->lock); spin_unlock_irq(&lp->lock);
...@@ -1657,14 +1649,14 @@ static void amd8111e_config_ipg(struct timer_list *t) ...@@ -1657,14 +1649,14 @@ static void amd8111e_config_ipg(struct timer_list *t)
unsigned int total_col_cnt; unsigned int total_col_cnt;
unsigned int tmp_ipg; unsigned int tmp_ipg;
if(lp->link_config.duplex == DUPLEX_FULL){ if (lp->link_config.duplex == DUPLEX_FULL) {
ipg_data->ipg = DEFAULT_IPG; ipg_data->ipg = DEFAULT_IPG;
return; return;
} }
if(ipg_data->ipg_state == SSTATE){ if (ipg_data->ipg_state == SSTATE) {
if(ipg_data->timer_tick == IPG_STABLE_TIME){ if (ipg_data->timer_tick == IPG_STABLE_TIME) {
ipg_data->timer_tick = 0; ipg_data->timer_tick = 0;
ipg_data->ipg = MIN_IPG - IPG_STEP; ipg_data->ipg = MIN_IPG - IPG_STEP;
...@@ -1676,7 +1668,7 @@ static void amd8111e_config_ipg(struct timer_list *t) ...@@ -1676,7 +1668,7 @@ static void amd8111e_config_ipg(struct timer_list *t)
ipg_data->timer_tick++; ipg_data->timer_tick++;
} }
if(ipg_data->ipg_state == CSTATE){ if (ipg_data->ipg_state == CSTATE) {
/* Get the current collision count */ /* Get the current collision count */
...@@ -1684,10 +1676,10 @@ static void amd8111e_config_ipg(struct timer_list *t) ...@@ -1684,10 +1676,10 @@ static void amd8111e_config_ipg(struct timer_list *t)
amd8111e_read_mib(mmio, xmt_collisions); amd8111e_read_mib(mmio, xmt_collisions);
if ((total_col_cnt - prev_col_cnt) < if ((total_col_cnt - prev_col_cnt) <
(ipg_data->diff_col_cnt)){ (ipg_data->diff_col_cnt)) {
ipg_data->diff_col_cnt = ipg_data->diff_col_cnt =
total_col_cnt - prev_col_cnt ; total_col_cnt - prev_col_cnt;
ipg_data->ipg = ipg_data->current_ipg; ipg_data->ipg = ipg_data->current_ipg;
} }
...@@ -1696,7 +1688,7 @@ static void amd8111e_config_ipg(struct timer_list *t) ...@@ -1696,7 +1688,7 @@ static void amd8111e_config_ipg(struct timer_list *t)
if (ipg_data->current_ipg <= MAX_IPG) if (ipg_data->current_ipg <= MAX_IPG)
tmp_ipg = ipg_data->current_ipg; tmp_ipg = ipg_data->current_ipg;
else{ else {
tmp_ipg = ipg_data->ipg; tmp_ipg = ipg_data->ipg;
ipg_data->ipg_state = SSTATE; ipg_data->ipg_state = SSTATE;
} }
...@@ -1748,24 +1740,24 @@ static int amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1748,24 +1740,24 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
int err, i; int err, i;
unsigned long reg_addr,reg_len; unsigned long reg_addr, reg_len;
struct amd8111e_priv *lp; struct amd8111e_priv *lp;
struct net_device *dev; struct net_device *dev;
err = pci_enable_device(pdev); err = pci_enable_device(pdev);
if(err){ if (err) {
dev_err(&pdev->dev, "Cannot enable new PCI device\n"); dev_err(&pdev->dev, "Cannot enable new PCI device\n");
return err; return err;
} }
if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Cannot find PCI base address\n"); dev_err(&pdev->dev, "Cannot find PCI base address\n");
err = -ENODEV; err = -ENODEV;
goto err_disable_pdev; goto err_disable_pdev;
} }
err = pci_request_regions(pdev, MODULE_NAME); err = pci_request_regions(pdev, MODULE_NAME);
if(err){ if (err) {
dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
goto err_disable_pdev; goto err_disable_pdev;
} }
...@@ -1798,7 +1790,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1798,7 +1790,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ; dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
#endif #endif
lp = netdev_priv(dev); lp = netdev_priv(dev);
...@@ -1821,16 +1813,16 @@ static int amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1821,16 +1813,16 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* Setting user defined parametrs */ /* Setting user defined parametrs */
lp->ext_phy_option = speed_duplex[card_idx]; lp->ext_phy_option = speed_duplex[card_idx];
if(coalesce[card_idx]) if (coalesce[card_idx])
lp->options |= OPTION_INTR_COAL_ENABLE; lp->options |= OPTION_INTR_COAL_ENABLE;
if(dynamic_ipg[card_idx++]) if (dynamic_ipg[card_idx++])
lp->options |= OPTION_DYN_IPG_ENABLE; lp->options |= OPTION_DYN_IPG_ENABLE;
/* Initialize driver entry points */ /* Initialize driver entry points */
dev->netdev_ops = &amd8111e_netdev_ops; dev->netdev_ops = &amd8111e_netdev_ops;
dev->ethtool_ops = &ops; dev->ethtool_ops = &ops;
dev->irq =pdev->irq; dev->irq = pdev->irq;
dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
dev->min_mtu = AMD8111E_MIN_MTU; dev->min_mtu = AMD8111E_MIN_MTU;
dev->max_mtu = AMD8111E_MAX_MTU; dev->max_mtu = AMD8111E_MAX_MTU;
...@@ -1861,7 +1853,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1861,7 +1853,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev); pci_set_drvdata(pdev, dev);
/* Initialize software ipg timer */ /* Initialize software ipg timer */
if(lp->options & OPTION_DYN_IPG_ENABLE){ if (lp->options & OPTION_DYN_IPG_ENABLE) {
timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0); timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0);
lp->ipg_data.ipg_timer.expires = jiffies + lp->ipg_data.ipg_timer.expires = jiffies +
IPG_CONVERGE_JIFFIES; IPG_CONVERGE_JIFFIES;
...@@ -1870,7 +1862,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, ...@@ -1870,7 +1862,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
} }
/* display driver and device information */ /* display driver and device information */
chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000) >> 28;
dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n", dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
chip_version, dev->dev_addr); chip_version, dev->dev_addr);
if (lp->ext_phy_id) if (lp->ext_phy_id)
......
...@@ -170,6 +170,7 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d) ...@@ -170,6 +170,7 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
static void hplance_writerap(void *priv, unsigned short value) static void hplance_writerap(void *priv, unsigned short value)
{ {
struct lance_private *lp = (struct lance_private *)priv; struct lance_private *lp = (struct lance_private *)priv;
do { do {
out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value); out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
...@@ -178,6 +179,7 @@ static void hplance_writerap(void *priv, unsigned short value) ...@@ -178,6 +179,7 @@ static void hplance_writerap(void *priv, unsigned short value)
static void hplance_writerdp(void *priv, unsigned short value) static void hplance_writerdp(void *priv, unsigned short value)
{ {
struct lance_private *lp = (struct lance_private *)priv; struct lance_private *lp = (struct lance_private *)priv;
do { do {
out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value); out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
...@@ -187,6 +189,7 @@ static unsigned short hplance_readrdp(void *priv) ...@@ -187,6 +189,7 @@ static unsigned short hplance_readrdp(void *priv)
{ {
struct lance_private *lp = (struct lance_private *)priv; struct lance_private *lp = (struct lance_private *)priv;
__u16 value; __u16 value;
do { do {
value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP); value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
......
...@@ -484,7 +484,8 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port, ...@@ -484,7 +484,8 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port,
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
/* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
* reset */ * reset
*/
ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed), ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
DEV_CLOCK_CFG); DEV_CLOCK_CFG);
......
...@@ -1044,7 +1044,8 @@ static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb, ...@@ -1044,7 +1044,8 @@ static netdev_tx_t lpc_eth_hard_start_xmit(struct sk_buff *skb,
if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) { if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
/* This function should never be called when there are no /* This function should never be called when there are no
buffers */ * buffers
*/
netif_stop_queue(ndev); netif_stop_queue(ndev);
spin_unlock_irq(&pldat->lock); spin_unlock_irq(&pldat->lock);
WARN(1, "BUG! TX request when no free TX buffers!\n"); WARN(1, "BUG! TX request when no free TX buffers!\n");
...@@ -1318,7 +1319,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) ...@@ -1318,7 +1319,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size); pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
/* Allocate a chunk of memory for the DMA ethernet buffers /* Allocate a chunk of memory for the DMA ethernet buffers
and descriptors */ * and descriptors
*/
pldat->dma_buff_base_v = pldat->dma_buff_base_v =
dma_alloc_coherent(dev, dma_alloc_coherent(dev,
pldat->dma_buff_size, &dma_handle, pldat->dma_buff_size, &dma_handle,
...@@ -1365,7 +1367,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) ...@@ -1365,7 +1367,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
__lpc_mii_mngt_reset(pldat); __lpc_mii_mngt_reset(pldat);
/* Force default PHY interface setup in chip, this will probably be /* Force default PHY interface setup in chip, this will probably be
changed by the PHY driver */ * changed by the PHY driver
*/
pldat->link = 0; pldat->link = 0;
pldat->speed = 100; pldat->speed = 100;
pldat->duplex = DUPLEX_FULL; pldat->duplex = DUPLEX_FULL;
......
...@@ -146,7 +146,8 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg) ...@@ -146,7 +146,8 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
/* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
* interrupt, as we poll for the completion of the read operation * interrupt, as we poll for the completion of the read operation
* in spider_net_read_phy. Should take about 50 us */ * in spider_net_read_phy. Should take about 50 us
*/
do { do {
readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD); readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
} while (readvalue & SPIDER_NET_GPREXEC); } while (readvalue & SPIDER_NET_GPREXEC);
...@@ -387,7 +388,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, ...@@ -387,7 +388,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
(~(SPIDER_NET_RXBUF_ALIGN - 1)); (~(SPIDER_NET_RXBUF_ALIGN - 1));
/* and we need to have it 128 byte aligned, therefore we allocate a /* and we need to have it 128 byte aligned, therefore we allocate a
* bit more */ * bit more
*/
/* allocate an skb */ /* allocate an skb */
descr->skb = netdev_alloc_skb(card->netdev, descr->skb = netdev_alloc_skb(card->netdev,
bufsize + SPIDER_NET_RXBUF_ALIGN - 1); bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
...@@ -488,7 +490,8 @@ spider_net_refill_rx_chain(struct spider_net_card *card) ...@@ -488,7 +490,8 @@ spider_net_refill_rx_chain(struct spider_net_card *card)
/* one context doing the refill (and a second context seeing that /* one context doing the refill (and a second context seeing that
* and omitting it) is ok. If called by NAPI, we'll be called again * and omitting it) is ok. If called by NAPI, we'll be called again
* as spider_net_decode_one_descr is called several times. If some * as spider_net_decode_one_descr is called several times. If some
* interrupt calls us, the NAPI is about to clean up anyway. */ * interrupt calls us, the NAPI is about to clean up anyway.
*/
if (!spin_trylock_irqsave(&chain->lock, flags)) if (!spin_trylock_irqsave(&chain->lock, flags))
return; return;
...@@ -523,14 +526,16 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card) ...@@ -523,14 +526,16 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
/* Put at least one buffer into the chain. if this fails, /* Put at least one buffer into the chain. if this fails,
* we've got a problem. If not, spider_net_refill_rx_chain * we've got a problem. If not, spider_net_refill_rx_chain
* will do the rest at the end of this function. */ * will do the rest at the end of this function.
*/
if (spider_net_prepare_rx_descr(card, chain->head)) if (spider_net_prepare_rx_descr(card, chain->head))
goto error; goto error;
else else
chain->head = chain->head->next; chain->head = chain->head->next;
/* This will allocate the rest of the rx buffers; /* This will allocate the rest of the rx buffers;
* if not, it's business as usual later on. */ * if not, it's business as usual later on.
*/
spider_net_refill_rx_chain(card); spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card); spider_net_enable_rxdmac(card);
return 0; return 0;
...@@ -706,7 +711,8 @@ spider_net_set_low_watermark(struct spider_net_card *card) ...@@ -706,7 +711,8 @@ spider_net_set_low_watermark(struct spider_net_card *card)
int i; int i;
/* Measure the length of the queue. Measurement does not /* Measure the length of the queue. Measurement does not
* need to be precise -- does not need a lock. */ * need to be precise -- does not need a lock.
*/
while (descr != card->tx_chain.head) { while (descr != card->tx_chain.head) {
status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE; status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
if (status == SPIDER_NET_DESCR_NOT_IN_USE) if (status == SPIDER_NET_DESCR_NOT_IN_USE)
...@@ -786,7 +792,8 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) ...@@ -786,7 +792,8 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
/* fallthrough, if we release the descriptors /* fallthrough, if we release the descriptors
* brutally (then we don't care about * brutally (then we don't care about
* SPIDER_NET_DESCR_CARDOWNED) */ * SPIDER_NET_DESCR_CARDOWNED)
*/
fallthrough; fallthrough;
case SPIDER_NET_DESCR_RESPONSE_ERROR: case SPIDER_NET_DESCR_RESPONSE_ERROR:
...@@ -948,7 +955,8 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, ...@@ -948,7 +955,8 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
skb_put(skb, hwdescr->valid_size); skb_put(skb, hwdescr->valid_size);
/* the card seems to add 2 bytes of junk in front /* the card seems to add 2 bytes of junk in front
* of the ethernet frame */ * of the ethernet frame
*/
#define SPIDER_MISALIGN 2 #define SPIDER_MISALIGN 2
skb_pull(skb, SPIDER_MISALIGN); skb_pull(skb, SPIDER_MISALIGN);
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
...@@ -1382,7 +1390,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, ...@@ -1382,7 +1390,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
/* PHY read operation completed */ /* PHY read operation completed */
/* we don't use semaphores, as we poll for the completion /* we don't use semaphores, as we poll for the completion
* of the read operation in spider_net_read_phy. Should take * of the read operation in spider_net_read_phy. Should take
* about 50 us */ * about 50 us
*/
show_error = 0; show_error = 0;
break; break;
case SPIDER_NET_GPWFFINT: case SPIDER_NET_GPWFFINT:
...@@ -1450,7 +1459,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg, ...@@ -1450,7 +1459,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
{ {
case SPIDER_NET_GTMFLLINT: case SPIDER_NET_GTMFLLINT:
/* TX RAM full may happen on a usual case. /* TX RAM full may happen on a usual case.
* Logging is not needed. */ * Logging is not needed.
*/
show_error = 0; show_error = 0;
break; break;
case SPIDER_NET_GRFDFLLINT: case SPIDER_NET_GRFDFLLINT:
...@@ -1694,7 +1704,8 @@ spider_net_enable_card(struct spider_net_card *card) ...@@ -1694,7 +1704,8 @@ spider_net_enable_card(struct spider_net_card *card)
{ {
int i; int i;
/* the following array consists of (register),(value) pairs /* the following array consists of (register),(value) pairs
* that are set in this function. A register of 0 ends the list */ * that are set in this function. A register of 0 ends the list
*/
u32 regs[][2] = { u32 regs[][2] = {
{ SPIDER_NET_GRESUMINTNUM, 0 }, { SPIDER_NET_GRESUMINTNUM, 0 },
{ SPIDER_NET_GREINTNUM, 0 }, { SPIDER_NET_GREINTNUM, 0 },
...@@ -1757,7 +1768,8 @@ spider_net_enable_card(struct spider_net_card *card) ...@@ -1757,7 +1768,8 @@ spider_net_enable_card(struct spider_net_card *card)
spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE); spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
/* set chain tail address for RX chains and /* set chain tail address for RX chains and
* enable DMA */ * enable DMA
*/
spider_net_enable_rxchtails(card); spider_net_enable_rxchtails(card);
spider_net_enable_rxdmac(card); spider_net_enable_rxdmac(card);
...@@ -1995,7 +2007,8 @@ static void spider_net_link_phy(struct timer_list *t) ...@@ -1995,7 +2007,8 @@ static void spider_net_link_phy(struct timer_list *t)
case BCM54XX_UNKNOWN: case BCM54XX_UNKNOWN:
/* copper, fiber with and without failed, /* copper, fiber with and without failed,
* retry from beginning */ * retry from beginning
*/
spider_net_setup_aneg(card); spider_net_setup_aneg(card);
card->medium = BCM54XX_COPPER; card->medium = BCM54XX_COPPER;
break; break;
...@@ -2263,7 +2276,8 @@ spider_net_setup_netdev(struct spider_net_card *card) ...@@ -2263,7 +2276,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
netdev->features |= NETIF_F_RXCSUM; netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX; netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
/* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | /* some time: NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
* NETIF_F_HW_VLAN_CTAG_FILTER */ * NETIF_F_HW_VLAN_CTAG_FILTER
*/
/* MTU range: 64 - 2294 */ /* MTU range: 64 - 2294 */
netdev->min_mtu = SPIDER_NET_MIN_MTU; netdev->min_mtu = SPIDER_NET_MIN_MTU;
......
...@@ -1914,7 +1914,8 @@ tc35815_set_multicast_list(struct net_device *dev) ...@@ -1914,7 +1914,8 @@ tc35815_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { if (dev->flags & IFF_PROMISC) {
/* With some (all?) 100MHalf HUB, controller will hang /* With some (all?) 100MHalf HUB, controller will hang
* if we enabled promiscuous mode before linkup... */ * if we enabled promiscuous mode before linkup...
*/
struct tc35815_local *lp = netdev_priv(dev); struct tc35815_local *lp = netdev_priv(dev);
if (!lp->link) if (!lp->link)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment